From 06904d4dbb396239d111f0350fd93da129381622 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 1 Dec 2017 15:37:38 +0100 Subject: [PATCH 01/59] contrib: import system containers Signed-off-by: Antonio Murdaca --- contrib/system_containers/centos/Dockerfile | 29 ++ contrib/system_containers/centos/README.md | 57 +++ contrib/system_containers/centos/cccp.yml | 41 ++ .../centos/config.json.template | 427 +++++++++++++++++ .../system_containers/centos/manifest.json | 10 + contrib/system_containers/centos/run.sh | 8 + .../system_containers/centos/service.template | 20 + .../system_containers/centos/set_mounts.sh | 7 + .../centos/tmpfiles.template | 5 + contrib/system_containers/fedora/Dockerfile | 30 ++ contrib/system_containers/fedora/README.md | 53 +++ .../fedora/config.json.template | 432 ++++++++++++++++++ .../system_containers/fedora/manifest.json | 10 + contrib/system_containers/fedora/run.sh | 8 + .../system_containers/fedora/service.template | 20 + .../system_containers/fedora/set_mounts.sh | 7 + .../fedora/tmpfiles.template | 5 + contrib/system_containers/rhel/Dockerfile | 41 ++ .../rhel/config.json.template | 422 +++++++++++++++++ contrib/system_containers/rhel/help.md | 37 ++ contrib/system_containers/rhel/manifest.json | 10 + contrib/system_containers/rhel/run.sh | 8 + .../system_containers/rhel/service.template | 20 + contrib/system_containers/rhel/set_mounts.sh | 7 + .../system_containers/rhel/tmpfiles.template | 5 + 25 files changed, 1719 insertions(+) create mode 100644 contrib/system_containers/centos/Dockerfile create mode 100644 contrib/system_containers/centos/README.md create mode 100644 contrib/system_containers/centos/cccp.yml create mode 100644 contrib/system_containers/centos/config.json.template create mode 100644 contrib/system_containers/centos/manifest.json create mode 100755 contrib/system_containers/centos/run.sh create mode 100644 contrib/system_containers/centos/service.template create mode 100755 contrib/system_containers/centos/set_mounts.sh create mode 100644 contrib/system_containers/centos/tmpfiles.template create mode 100644 contrib/system_containers/fedora/Dockerfile create mode 100644 contrib/system_containers/fedora/README.md create mode 100644 contrib/system_containers/fedora/config.json.template create mode 100644 contrib/system_containers/fedora/manifest.json create mode 100755 contrib/system_containers/fedora/run.sh create mode 100644 contrib/system_containers/fedora/service.template create mode 100755 contrib/system_containers/fedora/set_mounts.sh create mode 100644 contrib/system_containers/fedora/tmpfiles.template create mode 100644 contrib/system_containers/rhel/Dockerfile create mode 100644 contrib/system_containers/rhel/config.json.template create mode 100644 contrib/system_containers/rhel/help.md create mode 100644 contrib/system_containers/rhel/manifest.json create mode 100755 contrib/system_containers/rhel/run.sh create mode 100644 contrib/system_containers/rhel/service.template create mode 100755 contrib/system_containers/rhel/set_mounts.sh create mode 100644 contrib/system_containers/rhel/tmpfiles.template diff --git a/contrib/system_containers/centos/Dockerfile b/contrib/system_containers/centos/Dockerfile new file mode 100644 index 00000000..0797fb14 --- /dev/null +++ b/contrib/system_containers/centos/Dockerfile @@ -0,0 +1,29 @@ +FROM centos + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +RUN yum-config-manager --nogpgcheck --add-repo https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/ && \ + yum install --disablerepo=extras --nogpgcheck --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/centos/README.md b/contrib/system_containers/centos/README.md new file mode 100644 index 00000000..428bc6ff --- /dev/null +++ b/contrib/system_containers/centos/README.md @@ -0,0 +1,57 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` + +## Binary version + +You can find the image automatically built as: registry.centos.org/projectatomic/cri-o:latest diff --git a/contrib/system_containers/centos/cccp.yml b/contrib/system_containers/centos/cccp.yml new file mode 100644 index 00000000..ec4dab74 --- /dev/null +++ b/contrib/system_containers/centos/cccp.yml @@ -0,0 +1,41 @@ +# This is for the purpose of building containers on the CentOS Community Container +# Pipeline. The containers are built, tested and delivered to registry.centos.org and +# lifecycled as well. A corresponding entry must exist in the container index itself, +# located at https://github.com/CentOS/container-index/tree/master/index.d +# You can know more at the following links: +# * https://github.com/CentOS/container-pipeline-service/blob/master/README.md +# * https://github.com/CentOS/container-index/blob/master/README.rst +# * https://wiki.centos.org/ContainerPipeline + +# This will be part of the name of the container. It should match the job-id in index entry +job-id: cri-o + +#the following are optional, can be left blank +#defaults, where applicable are filled in +#nulecule-file : nulecule + +# This flag tells the container pipeline to skip user defined tests on their container +test-skip : True + +# This is path of the script that initiates the user defined tests. It must be able to +# return an exit code. +test-script : null + +# This is the path of custom build script. +build-script : null + +# This is the path of the custom delivery script +delivery-script : null + +# This flag tells the pipeline to deliver this container to docker hub. +docker-index : True + +# This flag can be used to enable or disable the custom delivery +custom-delivery : False + +# This flag can be used to enable or disable delivery of container to local registry +local-delivery : True + +Upstreams : + - ref : + url : diff --git a/contrib/system_containers/centos/config.json.template b/contrib/system_containers/centos/config.json.template new file mode 100644 index 00000000..785383d4 --- /dev/null +++ b/contrib/system_containers/centos/config.json.template @@ -0,0 +1,427 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/centos/manifest.json b/contrib/system_containers/centos/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/centos/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/centos/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/centos/service.template b/contrib/system_containers/centos/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/centos/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/centos/set_mounts.sh b/contrib/system_containers/centos/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/centos/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/centos/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/fedora/Dockerfile b/contrib/system_containers/fedora/Dockerfile new file mode 100644 index 00000000..da12c6f0 --- /dev/null +++ b/contrib/system_containers/fedora/Dockerfile @@ -0,0 +1,30 @@ +FROM registry.fedoraproject.org/fedora:27 + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +COPY README.md / + +RUN dnf install --enablerepo=updates-testing --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + dnf clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/fedora/README.md b/contrib/system_containers/fedora/README.md new file mode 100644 index 00000000..6de39964 --- /dev/null +++ b/contrib/system_containers/fedora/README.md @@ -0,0 +1,53 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` diff --git a/contrib/system_containers/fedora/config.json.template b/contrib/system_containers/fedora/config.json.template new file mode 100644 index 00000000..0642fbc1 --- /dev/null +++ b/contrib/system_containers/fedora/config.json.template @@ -0,0 +1,432 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ] + }, + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/fedora/manifest.json b/contrib/system_containers/fedora/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/fedora/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/fedora/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/fedora/service.template b/contrib/system_containers/fedora/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/fedora/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/fedora/set_mounts.sh b/contrib/system_containers/fedora/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/fedora/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/rhel/Dockerfile b/contrib/system_containers/rhel/Dockerfile new file mode 100644 index 00000000..3c113fda --- /dev/null +++ b/contrib/system_containers/rhel/Dockerfile @@ -0,0 +1,41 @@ +#oit## This file is managed by the OpenShift Image Tool +#oit## by the OpenShift Continuous Delivery team. +#oit## +#oit## Any yum repos listed in this file will effectively be ignored during CD builds. +#oit## Yum repos must be enabled in the oit configuration files. +#oit## Some aspects of this file may be managed programmatically. For example, the image name, labels (version, +#oit## release, and other), and the base FROM. Changes made directly in distgit may be lost during the next +#oit## reconciliation. +#oit## +FROM rhel7:7-released + +RUN \ + yum install --setopt=tsflags=nodocs -y socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + rpm -V socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] + +LABEL \ + com.redhat.component="cri-o-docker" \ + io.k8s.description="CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry." \ + maintainer="Jhon Honce " \ + name="openshift3/cri-o" \ + License="GPLv2+" \ + io.k8s.display-name="CRI-O" \ + summary="OCI-based implementation of Kubernetes Container Runtime Interface" \ + release="0.13.0.0" \ + version="v3.8.0" \ + architecture="x86_64" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + vendor="Red Hat" \ + io.openshift.tags="cri-o system rhel7" \ + atomic.type="system" diff --git a/contrib/system_containers/rhel/config.json.template b/contrib/system_containers/rhel/config.json.template new file mode 100644 index 00000000..a5eb001e --- /dev/null +++ b/contrib/system_containers/rhel/config.json.template @@ -0,0 +1,422 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [{ + "type": "mount" + }], + "resources": { + "devices": [{ + "access": "rwm", + "allow": true + }] + }, + "rootfsPropagation": "private" + }, + "mounts": [{ + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/rhel/help.md b/contrib/system_containers/rhel/help.md new file mode 100644 index 00000000..e46702e7 --- /dev/null +++ b/contrib/system_containers/rhel/help.md @@ -0,0 +1,37 @@ +% CRI-O (1) Container Image Pages +% Jhon Honce +% September 7, 2017 + +# NAME +cri-o - OCI-based implementation of Kubernetes Container Runtime Interface + +# DESCRIPTION +CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry. + +You can find more information on the CRI-O project at + +# USAGE +Pull from local docker and install system container: + +``` +# atomic pull --storage ostree docker:openshift3/cri-o:latest +# atomic install --system --system-package=no --name cri-o openshift3/cri-o +``` + +Start and enable as a systemd service: +``` +# systemctl enable --now cri-o +``` + +Stopping the service +``` +# systemctl stop cri-o +``` + +Removing the container +``` +# atomic uninstall cri-o +``` + +# SEE ALSO +man systemd(1) diff --git a/contrib/system_containers/rhel/manifest.json b/contrib/system_containers/rhel/manifest.json new file mode 100644 index 00000000..727abf9e --- /dev/null +++ b/contrib/system_containers/rhel/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL": "info", + "OPT_CNI": "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE": "/var/lib/containers/storage", + "VAR_LIB_ORIGIN": "/var/lib/origin", + "VAR_LIB_KUBE": "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/rhel/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/rhel/service.template b/contrib/system_containers/rhel/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/rhel/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/rhel/set_mounts.sh b/contrib/system_containers/rhel/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/rhel/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - From 21930bfe2b2e1ec86c3ec6834db35a5864b7f9d3 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Mon, 11 Dec 2017 16:14:38 +0100 Subject: [PATCH 02/59] vendor: fix deps for release-1.9 Signed-off-by: Antonio Murdaca --- vendor.conf | 14 +- .../api/admissionregistration/v1alpha1/doc.go | 4 +- .../v1alpha1/generated.pb.go | 1284 +--- .../v1alpha1/generated.proto | 96 +- .../v1alpha1/register.go | 2 - .../admissionregistration/v1alpha1/types.go | 117 - .../v1alpha1/types_swagger_doc_generated.go | 62 - .../v1alpha1/zz_generated.deepcopy.go | 216 - .../api/admissionregistration/v1beta1/doc.go | 25 + .../v1beta1/generated.pb.go | 2150 +++++++ .../v1beta1/generated.proto | 261 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 281 + .../v1beta1/types_swagger_doc_generated.go | 125 + .../v1beta1/zz_generated.deepcopy.go | 321 + vendor/k8s.io/api/apps/v1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 5553 +++++++++++++++- vendor/k8s.io/api/apps/v1/generated.proto | 524 +- vendor/k8s.io/api/apps/v1/register.go | 10 +- vendor/k8s.io/api/apps/v1/types.go | 624 +- .../apps/v1/types_swagger_doc_generated.go | 267 +- .../api/apps/v1/zz_generated.deepcopy.go | 696 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 585 +- .../k8s.io/api/apps/v1beta1/generated.proto | 28 + vendor/k8s.io/api/apps/v1beta1/register.go | 2 +- vendor/k8s.io/api/apps/v1beta1/types.go | 26 + .../v1beta1/types_swagger_doc_generated.go | 14 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 119 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 1194 +++- .../k8s.io/api/apps/v1beta2/generated.proto | 66 +- vendor/k8s.io/api/apps/v1beta2/register.go | 2 +- vendor/k8s.io/api/apps/v1beta2/types.go | 72 +- .../v1beta2/types_swagger_doc_generated.go | 36 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 179 +- vendor/k8s.io/api/authentication/v1/doc.go | 2 +- .../api/authentication/v1/generated.proto | 1 + .../k8s.io/api/authentication/v1/register.go | 2 +- .../v1/zz_generated.deepcopy.go | 31 - .../k8s.io/api/authentication/v1beta1/doc.go | 2 +- .../authentication/v1beta1/generated.proto | 1 + .../api/authentication/v1beta1/register.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 31 - vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../api/authorization/v1/generated.pb.go | 172 +- .../api/authorization/v1/generated.proto | 10 +- .../k8s.io/api/authorization/v1/register.go | 2 +- vendor/k8s.io/api/authorization/v1/types.go | 8 +- .../v1/types_swagger_doc_generated.go | 3 +- .../authorization/v1/zz_generated.deepcopy.go | 67 - .../k8s.io/api/authorization/v1beta1/doc.go | 2 +- .../api/authorization/v1beta1/generated.pb.go | 177 +- .../api/authorization/v1beta1/generated.proto | 10 +- .../api/authorization/v1beta1/register.go | 2 +- .../k8s.io/api/authorization/v1beta1/types.go | 8 +- .../v1beta1/types_swagger_doc_generated.go | 3 +- .../v1beta1/zz_generated.deepcopy.go | 67 - vendor/k8s.io/api/autoscaling/v1/doc.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 1 + vendor/k8s.io/api/autoscaling/v1/register.go | 2 +- .../autoscaling/v1/zz_generated.deepcopy.go | 83 - vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 2 +- .../api/autoscaling/v2beta1/generated.proto | 1 + .../api/autoscaling/v2beta1/register.go | 2 +- .../v2beta1/zz_generated.deepcopy.go | 71 - vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.proto | 1 + vendor/k8s.io/api/batch/v1/register.go | 2 +- .../api/batch/v1/zz_generated.deepcopy.go | 35 - vendor/k8s.io/api/batch/v1beta1/doc.go | 2 +- .../k8s.io/api/batch/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/batch/v1beta1/register.go | 2 +- vendor/k8s.io/api/batch/v1beta1/types.go | 5 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../batch/v1beta1/zz_generated.deepcopy.go | 39 - vendor/k8s.io/api/batch/v2alpha1/doc.go | 2 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 6 +- vendor/k8s.io/api/batch/v2alpha1/register.go | 2 +- vendor/k8s.io/api/batch/v2alpha1/types.go | 5 +- .../v2alpha1/types_swagger_doc_generated.go | 2 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 39 - vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 +- .../api/certificates/v1beta1/generated.proto | 1 + .../api/certificates/v1beta1/register.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 35 - .../api/core/v1/annotation_key_constants.go | 6 - vendor/k8s.io/api/core/v1/doc.go | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4741 ++++++++++---- vendor/k8s.io/api/core/v1/generated.proto | 259 +- vendor/k8s.io/api/core/v1/register.go | 2 +- vendor/k8s.io/api/core/v1/types.go | 235 +- .../core/v1/types_swagger_doc_generated.go | 126 +- .../api/core/v1/zz_generated.deepcopy.go | 943 +-- .../events/v1beta1}/doc.go | 8 +- .../k8s.io/api/events/v1beta1/generated.pb.go | 1306 ++++ .../k8s.io/api/events/v1beta1/generated.proto | 122 + vendor/k8s.io/api/events/v1beta1/register.go | 53 + vendor/k8s.io/api/events/v1beta1/types.go | 122 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 127 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 2071 +++--- .../api/extensions/v1beta1/generated.proto | 104 +- .../k8s.io/api/extensions/v1beta1/register.go | 6 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 123 +- .../v1beta1/types_swagger_doc_generated.go | 81 +- .../v1beta1/zz_generated.deepcopy.go | 428 +- vendor/k8s.io/api/networking/v1/doc.go | 2 +- .../k8s.io/api/networking/v1/generated.proto | 1 + vendor/k8s.io/api/networking/v1/register.go | 2 +- .../networking/v1/zz_generated.deepcopy.go | 47 - vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 1 + vendor/k8s.io/api/policy/v1beta1/register.go | 2 +- .../policy/v1beta1/zz_generated.deepcopy.go | 35 - vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 348 +- vendor/k8s.io/api/rbac/v1/generated.proto | 16 + vendor/k8s.io/api/rbac/v1/register.go | 2 +- vendor/k8s.io/api/rbac/v1/types.go | 14 + .../rbac/v1/types_swagger_doc_generated.go | 16 +- .../api/rbac/v1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 350 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 15 + vendor/k8s.io/api/rbac/v1alpha1/register.go | 2 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 14 + .../v1alpha1/types_swagger_doc_generated.go | 16 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 349 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 16 + vendor/k8s.io/api/rbac/v1beta1/register.go | 2 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 16 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 +- .../api/scheduling/v1alpha1/generated.proto | 1 + .../api/scheduling/v1alpha1/register.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 23 - vendor/k8s.io/api/settings/v1alpha1/doc.go | 2 +- .../api/settings/v1alpha1/generated.proto | 1 + .../k8s.io/api/settings/v1alpha1/register.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 27 - vendor/k8s.io/api/storage/v1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 120 +- vendor/k8s.io/api/storage/v1/generated.proto | 10 +- vendor/k8s.io/api/storage/v1/register.go | 2 +- vendor/k8s.io/api/storage/v1/types.go | 22 + .../storage/v1/types_swagger_doc_generated.go | 1 + .../api/storage/v1/zz_generated.deepcopy.go | 32 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 20 + .../api/storage/v1alpha1/generated.pb.go | 1523 +++++ .../api/storage/v1alpha1/generated.proto | 128 + .../k8s.io/api/storage/v1alpha1/register.go | 50 + vendor/k8s.io/api/storage/v1alpha1/types.go | 126 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 188 + vendor/k8s.io/api/storage/v1beta1/doc.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 119 +- .../api/storage/v1beta1/generated.proto | 10 +- vendor/k8s.io/api/storage/v1beta1/register.go | 2 +- vendor/k8s.io/api/storage/v1beta1/types.go | 22 + .../v1beta1/types_swagger_doc_generated.go | 1 + .../storage/v1beta1/zz_generated.deepcopy.go | 32 +- .../k8s.io/apiextensions-apiserver/README.md | 2 +- .../pkg/features/kube_features.go | 3 +- .../apimachinery/pkg/api/errors/errors.go | 33 +- .../apimachinery/pkg/api/meta/interfaces.go | 3 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 121 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 19 +- .../apimachinery/pkg/api/meta/unstructured.go | 18 +- .../pkg/api/resource/zz_generated.deepcopy.go | 17 - .../pkg/apimachinery/registered/registered.go | 2 +- .../apimachinery/pkg/apimachinery/types.go | 2 +- .../internalversion/zz_generated.deepcopy.go | 18 - .../pkg/apis/meta/v1/conversion.go | 7 + .../pkg/apis/meta/v1/generated.pb.go | 378 +- .../pkg/apis/meta/v1/generated.proto | 31 +- .../apimachinery/pkg/apis/meta/v1/register.go | 2 - .../apimachinery/pkg/apis/meta/v1/types.go | 23 +- .../meta/v1/types_swagger_doc_generated.go | 4 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 453 ++ .../apis/meta/v1/unstructured/unstructured.go | 607 +- .../meta/v1/unstructured/unstructured_list.go | 189 + .../v1/unstructured/zz_generated.deepcopy.go | 18 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 154 - .../meta/v1alpha1/zz_generated.deepcopy.go | 38 - .../apimachinery/pkg/conversion/cloner.go | 249 - .../pkg/labels/zz_generated.deepcopy.go | 17 - .../k8s.io/apimachinery/pkg/runtime/codec.go | 16 +- .../unstructured => runtime}/converter.go | 65 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 4 +- .../apimachinery/pkg/runtime/interfaces.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 180 +- .../pkg/runtime/serializer/json/json.go | 3 +- .../pkg/runtime/zz_generated.deepcopy.go | 25 - .../pkg/util/mergepatch/errors.go | 1 + .../k8s.io/apimachinery/pkg/util/net/http.go | 12 +- .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 320 +- .../pkg/util/strategicpatch/types.go | 193 + .../pkg/watch/zz_generated.deepcopy.go | 17 - .../third_party/forked/golang/json/fields.go | 9 +- .../client-go/discovery/unstructured.go | 66 +- .../admissionregistration/interface.go | 54 + .../v1alpha1/initializerconfiguration.go | 87 + .../v1alpha1/interface.go | 45 + .../v1beta1/interface.go | 52 + .../v1beta1/mutatingwebhookconfiguration.go | 87 + .../v1beta1/validatingwebhookconfiguration.go | 87 + .../client-go/informers/apps/interface.go | 62 + .../informers/apps/v1/controllerrevision.go | 88 + .../client-go/informers/apps/v1/daemonset.go | 88 + .../client-go/informers/apps/v1/deployment.go | 88 + .../client-go/informers/apps/v1/interface.go | 73 + .../client-go/informers/apps/v1/replicaset.go | 88 + .../informers/apps/v1/statefulset.go | 88 + .../apps/v1beta1/controllerrevision.go | 88 + .../informers/apps/v1beta1/deployment.go | 88 + .../informers/apps/v1beta1/interface.go | 59 + .../informers/apps/v1beta1/statefulset.go | 88 + .../apps/v1beta2/controllerrevision.go | 88 + .../informers/apps/v1beta2/daemonset.go | 88 + .../informers/apps/v1beta2/deployment.go | 88 + .../informers/apps/v1beta2/interface.go | 73 + .../informers/apps/v1beta2/replicaset.go | 88 + .../informers/apps/v1beta2/statefulset.go | 88 + .../informers/autoscaling/interface.go | 54 + .../autoscaling/v1/horizontalpodautoscaler.go | 88 + .../informers/autoscaling/v1/interface.go | 45 + .../v2beta1/horizontalpodautoscaler.go | 88 + .../autoscaling/v2beta1/interface.go | 45 + .../client-go/informers/batch/interface.go | 62 + .../client-go/informers/batch/v1/interface.go | 45 + .../client-go/informers/batch/v1/job.go | 88 + .../informers/batch/v1beta1/cronjob.go | 88 + .../informers/batch/v1beta1/interface.go | 45 + .../informers/batch/v2alpha1/cronjob.go | 88 + .../informers/batch/v2alpha1/interface.go | 45 + .../informers/certificates/interface.go | 46 + .../v1beta1/certificatesigningrequest.go | 87 + .../certificates/v1beta1/interface.go | 45 + .../client-go/informers/core/interface.go | 46 + .../informers/core/v1/componentstatus.go | 87 + .../client-go/informers/core/v1/configmap.go | 88 + .../client-go/informers/core/v1/endpoints.go | 88 + .../client-go/informers/core/v1/event.go | 88 + .../client-go/informers/core/v1/interface.go | 150 + .../client-go/informers/core/v1/limitrange.go | 88 + .../client-go/informers/core/v1/namespace.go | 87 + .../client-go/informers/core/v1/node.go | 87 + .../informers/core/v1/persistentvolume.go | 87 + .../core/v1/persistentvolumeclaim.go | 88 + .../k8s.io/client-go/informers/core/v1/pod.go | 88 + .../informers/core/v1/podtemplate.go | 88 + .../core/v1/replicationcontroller.go | 88 + .../informers/core/v1/resourcequota.go | 88 + .../client-go/informers/core/v1/secret.go | 88 + .../client-go/informers/core/v1/service.go | 88 + .../informers/core/v1/serviceaccount.go | 88 + .../client-go/informers/events/interface.go | 46 + .../informers/events/v1beta1/event.go | 88 + .../informers/events/v1beta1/interface.go | 45 + .../informers/extensions/interface.go | 46 + .../informers/extensions/v1beta1/daemonset.go | 88 + .../extensions/v1beta1/deployment.go | 88 + .../informers/extensions/v1beta1/ingress.go | 88 + .../informers/extensions/v1beta1/interface.go | 73 + .../extensions/v1beta1/podsecuritypolicy.go | 87 + .../extensions/v1beta1/replicaset.go | 88 + vendor/k8s.io/client-go/informers/factory.go | 208 + vendor/k8s.io/client-go/informers/generic.go | 254 + .../internalinterfaces/factory_interfaces.go | 37 + .../informers/networking/interface.go | 46 + .../informers/networking/v1/interface.go | 45 + .../informers/networking/v1/networkpolicy.go | 88 + .../client-go/informers/policy/interface.go | 46 + .../informers/policy/v1beta1/interface.go | 45 + .../policy/v1beta1/poddisruptionbudget.go | 88 + .../client-go/informers/rbac/interface.go | 62 + .../informers/rbac/v1/clusterrole.go | 87 + .../informers/rbac/v1/clusterrolebinding.go | 87 + .../client-go/informers/rbac/v1/interface.go | 66 + .../client-go/informers/rbac/v1/role.go | 88 + .../informers/rbac/v1/rolebinding.go | 88 + .../informers/rbac/v1alpha1/clusterrole.go | 87 + .../rbac/v1alpha1/clusterrolebinding.go | 87 + .../informers/rbac/v1alpha1/interface.go | 66 + .../client-go/informers/rbac/v1alpha1/role.go | 88 + .../informers/rbac/v1alpha1/rolebinding.go | 88 + .../informers/rbac/v1beta1/clusterrole.go | 87 + .../rbac/v1beta1/clusterrolebinding.go | 87 + .../informers/rbac/v1beta1/interface.go | 66 + .../client-go/informers/rbac/v1beta1/role.go | 88 + .../informers/rbac/v1beta1/rolebinding.go | 88 + .../informers/scheduling/interface.go | 46 + .../scheduling/v1alpha1/interface.go | 45 + .../scheduling/v1alpha1/priorityclass.go | 87 + .../client-go/informers/settings/interface.go | 46 + .../informers/settings/v1alpha1/interface.go | 45 + .../informers/settings/v1alpha1/podpreset.go | 88 + .../client-go/informers/storage/interface.go | 62 + .../informers/storage/v1/interface.go | 45 + .../informers/storage/v1/storageclass.go | 87 + .../informers/storage/v1alpha1/interface.go | 45 + .../storage/v1alpha1/volumeattachment.go | 87 + .../informers/storage/v1beta1/interface.go | 45 + .../informers/storage/v1beta1/storageclass.go | 87 + .../k8s.io/client-go/kubernetes/clientset.go | 56 +- .../client-go/kubernetes/scheme/register.go | 6 + .../v1alpha1/admissionregistration_client.go | 5 - .../externaladmissionhookconfiguration.go | 145 - .../v1alpha1/generated_expansion.go | 2 - .../v1beta1/admissionregistration_client.go | 93 + .../admissionregistration/v1beta1/doc.go | 18 + .../v1beta1/generated_expansion.go | 21 + .../v1beta1/mutatingwebhookconfiguration.go | 145 + .../v1beta1/validatingwebhookconfiguration.go | 145 + .../kubernetes/typed/apps/v1/apps_client.go | 20 + .../typed/apps/v1/controllerrevision.go | 155 + .../kubernetes/typed/apps/v1/deployment.go | 172 + .../typed/apps/v1/generated_expansion.go | 8 + .../kubernetes/typed/apps/v1/replicaset.go | 172 + .../kubernetes/typed/apps/v1/statefulset.go | 172 + .../kubernetes/typed/events/v1beta1/doc.go | 18 + .../kubernetes/typed/events/v1beta1/event.go | 155 + .../typed/events/v1beta1/events_client.go | 88 + .../events/v1beta1/generated_expansion.go | 19 + .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/thirdpartyresource.go | 145 - .../kubernetes/typed/storage/v1alpha1/doc.go | 18 + .../storage/v1alpha1/generated_expansion.go | 19 + .../typed/storage/v1alpha1/storage_client.go | 88 + .../storage/v1alpha1/volumeattachment.go | 161 + .../v1alpha1/expansion_generated.go | 23 + .../v1alpha1/initializerconfiguration.go | 65 + .../v1beta1/expansion_generated.go | 27 + .../v1beta1/mutatingwebhookconfiguration.go | 65 + .../v1beta1/validatingwebhookconfiguration.go | 65 + .../listers/apps/v1/controllerrevision.go | 94 + .../client-go/listers/apps/v1/daemonset.go | 94 + .../listers/apps/v1/daemonset_expansion.go | 113 + .../client-go/listers/apps/v1/deployment.go | 94 + .../listers/apps/v1/deployment_expansion.go | 70 + .../listers/apps/v1/expansion_generated.go | 27 + .../client-go/listers/apps/v1/replicaset.go | 94 + .../listers/apps/v1/replicaset_expansion.go | 73 + .../client-go/listers/apps/v1/statefulset.go | 94 + .../listers/apps/v1/statefulset_expansion.go | 77 + .../apps/v1beta1/controllerrevision.go | 94 + .../listers/apps/v1beta1/deployment.go | 94 + .../apps/v1beta1/expansion_generated.go | 43 + .../client-go/listers/apps/v1beta1/scale.go | 94 + .../listers/apps/v1beta1/statefulset.go | 94 + .../apps/v1beta1/statefulset_expansion.go | 77 + .../apps/v1beta2/controllerrevision.go | 94 + .../listers/apps/v1beta2/daemonset.go | 94 + .../apps/v1beta2/daemonset_expansion.go | 113 + .../listers/apps/v1beta2/deployment.go | 94 + .../apps/v1beta2/deployment_expansion.go | 70 + .../apps/v1beta2/expansion_generated.go | 35 + .../listers/apps/v1beta2/replicaset.go | 94 + .../apps/v1beta2/replicaset_expansion.go | 73 + .../client-go/listers/apps/v1beta2/scale.go | 94 + .../listers/apps/v1beta2/statefulset.go | 94 + .../apps/v1beta2/statefulset_expansion.go | 77 + .../autoscaling/v1/expansion_generated.go | 27 + .../autoscaling/v1/horizontalpodautoscaler.go | 94 + .../v2beta1/expansion_generated.go | 27 + .../v2beta1/horizontalpodautoscaler.go | 94 + .../listers/batch/v1/expansion_generated.go | 19 + .../k8s.io/client-go/listers/batch/v1/job.go | 94 + .../listers/batch/v1/job_expansion.go | 68 + .../listers/batch/v1beta1/cronjob.go | 94 + .../batch/v1beta1/expansion_generated.go | 27 + .../listers/batch/v2alpha1/cronjob.go | 94 + .../batch/v2alpha1/expansion_generated.go | 27 + .../v1beta1/certificatesigningrequest.go | 65 + .../v1beta1/expansion_generated.go | 23 + .../listers/core/v1/componentstatus.go | 65 + .../client-go/listers/core/v1/configmap.go | 94 + .../client-go/listers/core/v1/endpoints.go | 94 + .../k8s.io/client-go/listers/core/v1/event.go | 94 + .../listers/core/v1/expansion_generated.go | 111 + .../client-go/listers/core/v1/limitrange.go | 94 + .../client-go/listers/core/v1/namespace.go | 65 + .../k8s.io/client-go/listers/core/v1/node.go | 65 + .../listers/core/v1/node_expansion.go | 48 + .../listers/core/v1/persistentvolume.go | 65 + .../listers/core/v1/persistentvolumeclaim.go | 94 + .../k8s.io/client-go/listers/core/v1/pod.go | 94 + .../client-go/listers/core/v1/podtemplate.go | 94 + .../listers/core/v1/replicationcontroller.go | 94 + .../v1/replicationcontroller_expansion.go | 66 + .../listers/core/v1/resourcequota.go | 94 + .../client-go/listers/core/v1/secret.go | 94 + .../client-go/listers/core/v1/service.go | 94 + .../listers/core/v1/service_expansion.go | 56 + .../listers/core/v1/serviceaccount.go | 94 + .../client-go/listers/events/v1beta1/event.go | 94 + .../events/v1beta1/expansion_generated.go | 27 + .../listers/extensions/v1beta1/daemonset.go | 94 + .../extensions/v1beta1/daemonset_expansion.go | 114 + .../listers/extensions/v1beta1/deployment.go | 94 + .../v1beta1/deployment_expansion.go | 70 + .../extensions/v1beta1/expansion_generated.go | 39 + .../listers/extensions/v1beta1/ingress.go | 94 + .../extensions/v1beta1/podsecuritypolicy.go | 65 + .../listers/extensions/v1beta1/replicaset.go | 94 + .../v1beta1/replicaset_expansion.go | 73 + .../listers/extensions/v1beta1/scale.go | 94 + .../networking/v1/expansion_generated.go | 27 + .../listers/networking/v1/networkpolicy.go | 94 + .../listers/policy/v1beta1/eviction.go | 94 + .../policy/v1beta1/expansion_generated.go | 27 + .../policy/v1beta1/poddisruptionbudget.go | 94 + .../v1beta1/poddisruptionbudget_expansion.go | 74 + .../client-go/listers/rbac/v1/clusterrole.go | 65 + .../listers/rbac/v1/clusterrolebinding.go | 65 + .../listers/rbac/v1/expansion_generated.go | 43 + .../k8s.io/client-go/listers/rbac/v1/role.go | 94 + .../client-go/listers/rbac/v1/rolebinding.go | 94 + .../listers/rbac/v1alpha1/clusterrole.go | 65 + .../rbac/v1alpha1/clusterrolebinding.go | 65 + .../rbac/v1alpha1/expansion_generated.go | 43 + .../client-go/listers/rbac/v1alpha1/role.go | 94 + .../listers/rbac/v1alpha1/rolebinding.go | 94 + .../listers/rbac/v1beta1/clusterrole.go | 65 + .../rbac/v1beta1/clusterrolebinding.go | 65 + .../rbac/v1beta1/expansion_generated.go | 43 + .../client-go/listers/rbac/v1beta1/role.go | 94 + .../listers/rbac/v1beta1/rolebinding.go | 94 + .../v1alpha1/expansion_generated.go | 23 + .../scheduling/v1alpha1/priorityclass.go | 65 + .../settings/v1alpha1/expansion_generated.go | 27 + .../listers/settings/v1alpha1/podpreset.go | 94 + .../listers/storage/v1/expansion_generated.go | 23 + .../listers/storage/v1/storageclass.go | 65 + .../storage/v1alpha1/expansion_generated.go | 23 + .../storage/v1alpha1/volumeattachment.go | 65 + .../storage/v1beta1/expansion_generated.go | 23 + .../listers/storage/v1beta1/storageclass.go | 65 + vendor/k8s.io/client-go/pkg/version/base.go | 8 +- vendor/k8s.io/client-go/rest/request.go | 20 + .../client-go/rest/zz_generated.deepcopy.go | 17 - .../k8s.io/client-go/tools/cache/reflector.go | 13 +- .../client-go/tools/clientcmd/api/doc.go | 2 +- .../clientcmd/api/zz_generated.deepcopy.go | 39 - .../tools/remotecommand/remotecommand.go | 26 +- vendor/k8s.io/client-go/transport/cache.go | 2 +- vendor/k8s.io/kube-openapi/README.md | 18 +- .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 275 + .../kube-openapi/pkg/util/proto/openapi.go | 251 + .../k8s.io/kubernetes/pkg/api/service/util.go | 5 +- .../pkg/api/v1/zz_generated.conversion.go | 5430 ---------------- .../kubernetes/pkg/api/validation/events.go | 84 - .../pkg/apis/autoscaling/annotations.go | 34 + .../kubernetes/pkg/apis/autoscaling/doc.go | 19 + .../pkg/apis/autoscaling/register.go | 53 + .../kubernetes/pkg/apis/autoscaling/types.go | 363 ++ .../apis/autoscaling/zz_generated.deepcopy.go | 481 ++ .../core}/annotation_key_constants.go | 6 +- .../kubernetes/pkg/{api => apis/core}/doc.go | 4 +- .../pkg/{api => apis/core}/field_constants.go | 6 +- .../pkg/{api => apis/core}/helper/helpers.go | 341 +- .../pkg/{api => apis/core}/install/install.go | 11 +- .../kubernetes/pkg/{api => apis/core}/json.go | 2 +- .../pkg/{api => apis/core}/objectreference.go | 2 +- .../kubernetes/pkg/apis/core/pods/helpers.go | 63 + .../pkg/{api => apis/core}/register.go | 2 +- .../pkg/{api => apis/core}/resource.go | 2 +- .../pkg/{api => apis/core}/taint.go | 2 +- .../pkg/{api => apis/core}/toleration.go | 2 +- .../pkg/{api => apis/core}/types.go | 225 +- .../pkg/{api => apis/core}/v1/conversion.go | 151 +- .../pkg/{api => apis/core}/v1/defaults.go | 23 +- .../pkg/{api => apis/core}/v1/doc.go | 8 +- .../{api => apis/core}/v1/helper/helpers.go | 23 +- .../pkg/{api => apis/core}/v1/register.go | 0 .../apis/core/v1/zz_generated.conversion.go | 5620 +++++++++++++++++ .../core}/v1/zz_generated.defaults.go | 12 +- .../pkg/{api => apis/core}/validation/doc.go | 2 +- .../pkg/apis/core/validation/events.go | 129 + .../core}/validation/validation.go | 1127 ++-- .../core}/zz_generated.deepcopy.go | 945 +-- .../kubernetes/pkg/apis/extensions/doc.go | 2 +- .../pkg/apis/extensions/register.go | 7 +- .../kubernetes/pkg/apis/extensions/types.go | 155 +- .../apis/extensions/zz_generated.deepcopy.go | 480 +- .../kubernetes/pkg/apis/networking/doc.go | 2 +- .../kubernetes/pkg/apis/networking/types.go | 2 +- .../apis/networking/zz_generated.deepcopy.go | 51 +- .../kubernetes/pkg/cloudprovider/README.md | 4 +- .../kubernetes/pkg/cloudprovider/cloud.go | 6 + .../pkg/controller/client_builder.go | 19 +- .../pkg/controller/controller_utils.go | 8 +- .../kubernetes/pkg/features/kube_features.go | 68 +- vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go | 19 + .../kubernetes/pkg/fieldpath/fieldpath.go | 103 + .../apis/cri/v1alpha1/runtime/constants.go | 28 + .../pkg/kubelet/container/helpers.go | 4 +- .../pkg/kubelet/container/runtime.go | 9 +- .../kubelet/network/hostport/fake_iptables.go | 24 +- .../network/hostport/hostport_manager.go | 13 + .../network/hostport/hostport_syncer.go | 3 +- .../kubelet/server/portforward/httpstream.go | 2 +- .../kubelet/server/portforward/websocket.go | 2 +- .../server/remotecommand/httpstream.go | 2 +- .../pkg/kubelet/types/pod_update.go | 4 +- .../pkg/proxy/healthcheck/healthcheck.go | 2 +- .../kubernetes/pkg/proxy/iptables/proxier.go | 125 +- .../kubernetes/pkg/proxy/util/conntrack.go | 8 +- .../kubernetes/pkg/proxy/util/endpoints.go | 26 +- .../k8s.io/kubernetes/pkg/proxy/util/utils.go | 4 +- .../kubernetes/pkg/serviceaccount/util.go | 2 +- .../kubernetes/pkg/util/iptables/iptables.go | 4 +- .../kubernetes/pkg/util/mount/exec_mount.go | 140 + .../pkg/util/mount/exec_mount_unsupported.go | 87 + .../kubernetes/pkg/util/mount/mount_linux.go | 6 +- .../pkg/util/mount/mount_unsupported.go | 2 +- .../kubernetes/pkg/util/pointer/pointer.go | 6 + .../kubernetes/pkg/util/taints/taints.go | 4 +- .../k8s.io/kubernetes/pkg/volume/plugins.go | 61 + .../kubernetes/pkg/volume/util/error.go | 41 + .../kubernetes/pkg/volume/util/finalizer.go | 68 + .../k8s.io/kubernetes/pkg/volume/util/util.go | 214 +- .../kubernetes/pkg/volume/util/util_linux.go | 106 + .../pkg/volume/util/util_unsupported.go | 39 + vendor/k8s.io/kubernetes/pkg/volume/volume.go | 47 +- 534 files changed, 52933 insertions(+), 17527 deletions(-) create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{apimachinery/pkg/conversion/unstructured => api/events/v1beta1}/doc.go (75%) create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/cloner.go rename vendor/k8s.io/apimachinery/pkg/{conversion/unstructured => runtime}/converter.go (90%) create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/apps/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/interface.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/interface.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/informers/events/interface.go create mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/factory.go create mode 100644 vendor/k8s.io/client-go/informers/generic.go create mode 100644 vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go create mode 100644 vendor/k8s.io/client-go/informers/networking/interface.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/policy/interface.go create mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/interface.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/informers/settings/interface.go create mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/informers/storage/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/node_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/service_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/events.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/annotation_key_constants.go (95%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/doc.go (91%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/field_constants.go (92%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/helper/helpers.go (58%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/install/install.go (89%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/json.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/objectreference.go (98%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/register.go (99%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/resource.go (99%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/taint.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/toleration.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/types.go (95%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/conversion.go (72%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/defaults.go (94%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/doc.go (73%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/helper/helpers.go (94%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/register.go (100%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/zz_generated.defaults.go (97%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/validation/doc.go (90%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/validation/validation.go (79%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/zz_generated.deepcopy.go (77%) create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/error.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go diff --git a/vendor.conf b/vendor.conf index e534c48e..e7529a8c 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,11 +1,11 @@ -k8s.io/kubernetes v1.9.0-alpha.2 https://github.com/kubernetes/kubernetes -k8s.io/client-go master https://github.com/kubernetes/client-go -k8s.io/apimachinery master https://github.com/kubernetes/apimachinery -k8s.io/apiserver master https://github.com/kubernetes/apiserver +k8s.io/kubernetes release-1.9 https://github.com/kubernetes/kubernetes +k8s.io/client-go release-6.0 https://github.com/kubernetes/client-go +k8s.io/apimachinery release-1.9 https://github.com/kubernetes/apimachinery +k8s.io/apiserver release-1.9 https://github.com/kubernetes/apiserver k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils -k8s.io/api master https://github.com/kubernetes/api -k8s.io/kube-openapi abfc5fbe1cf87ee697db107fdfd24c32fe4397a8 https://github.com/kubernetes/kube-openapi -k8s.io/apiextensions-apiserver master https://github.com/kubernetes/apiextensions-apiserver +k8s.io/api release-1.9 https://github.com/kubernetes/api +k8s.io/kube-openapi 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 https://github.com/kubernetes/kube-openapi +k8s.io/apiextensions-apiserver release-1.9 https://github.com/kubernetes/apiextensions-apiserver # github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 49e0e098..8a5d1fbb 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and ExternalAdmissionHookConfiguration is for the +// InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. // +groupName=admissionregistration.k8s.io package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 4c66de14..bdc26637 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -25,16 +25,10 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto It has these top-level messages: - AdmissionHookClientConfig - ExternalAdmissionHook - ExternalAdmissionHookConfiguration - ExternalAdmissionHookConfigurationList Initializer InitializerConfiguration InitializerConfigurationList Rule - RuleWithOperations - ServiceReference */ package v1alpha1 @@ -58,230 +52,32 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AdmissionHookClientConfig) Reset() { *m = AdmissionHookClientConfig{} } -func (*AdmissionHookClientConfig) ProtoMessage() {} -func (*AdmissionHookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalAdmissionHook) Reset() { *m = ExternalAdmissionHook{} } -func (*ExternalAdmissionHook) ProtoMessage() {} -func (*ExternalAdmissionHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalAdmissionHookConfiguration) Reset() { *m = ExternalAdmissionHookConfiguration{} } -func (*ExternalAdmissionHookConfiguration) ProtoMessage() {} -func (*ExternalAdmissionHookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *ExternalAdmissionHookConfigurationList) Reset() { - *m = ExternalAdmissionHookConfigurationList{} -} -func (*ExternalAdmissionHookConfigurationList) ProtoMessage() {} -func (*ExternalAdmissionHookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - func (m *Initializer) Reset() { *m = Initializer{} } func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } func (*InitializerConfiguration) ProtoMessage() {} func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{1} } func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } func (*InitializerConfigurationList) ProtoMessage() {} func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{2} } func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func init() { - proto.RegisterType((*AdmissionHookClientConfig)(nil), "k8s.io.api.admissionregistration.v1alpha1.AdmissionHookClientConfig") - proto.RegisterType((*ExternalAdmissionHook)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHook") - proto.RegisterType((*ExternalAdmissionHookConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfiguration") - proto.RegisterType((*ExternalAdmissionHookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList") proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.RuleWithOperations") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1alpha1.ServiceReference") } -func (m *AdmissionHookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionHookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n1, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.CABundle != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URLPath))) - i += copy(dAtA[i:], m.URLPath) - return i, nil -} - -func (m *ExternalAdmissionHook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n2, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - return i, nil -} - -func (m *ExternalAdmissionHookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.ExternalAdmissionHooks) > 0 { - for _, msg := range m.ExternalAdmissionHooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ExternalAdmissionHookConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -334,11 +130,11 @@ func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n1 if len(m.Initializers) > 0 { for _, msg := range m.Initializers { dAtA[i] = 0x12 @@ -372,11 +168,11 @@ func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -455,73 +251,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n7, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -549,68 +278,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *AdmissionHookClientConfig) Size() (n int) { - var l int - _ = l - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.URLPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalAdmissionHook) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalAdmissionHookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalAdmissionHooks) > 0 { - for _, e := range m.ExternalAdmissionHooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ExternalAdmissionHookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *Initializer) Size() (n int) { var l int _ = l @@ -677,30 +344,6 @@ func (m *Rule) Size() (n int) { return n } -func (m *RuleWithOperations) Size() (n int) { - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceReference) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -714,53 +357,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AdmissionHookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionHookClientConfig{`, - `Service:` + strings.Replace(strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1), `&`, ``, 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URLPath:` + fmt.Sprintf("%v", this.URLPath) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "AdmissionHookClientConfig", "AdmissionHookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `ExternalAdmissionHooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExternalAdmissionHooks), "ExternalAdmissionHook", "ExternalAdmissionHook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ExternalAdmissionHookConfiguration", "ExternalAdmissionHookConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Initializer) String() string { if this == nil { return "nil" @@ -806,28 +402,6 @@ func (this *Rule) String() string { }, "") return s } -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -836,538 +410,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AdmissionHookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionHookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionHookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URLPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URLPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalAdmissionHooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalAdmissionHooks = append(m.ExternalAdmissionHooks, ExternalAdmissionHook{}) - if err := m.ExternalAdmissionHooks[len(m.ExternalAdmissionHooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ExternalAdmissionHookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1837,223 +879,6 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -2164,61 +989,40 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 893 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8b, 0x23, 0x45, - 0x14, 0x4e, 0x65, 0x32, 0x4c, 0x52, 0x49, 0xd8, 0xdd, 0x42, 0x97, 0x38, 0x48, 0x27, 0xf4, 0x61, - 0xc9, 0x22, 0x76, 0x3b, 0xa3, 0x2c, 0x82, 0x88, 0x4e, 0x8f, 0xbf, 0x06, 0x66, 0x77, 0xc7, 0x72, - 0x55, 0x10, 0x0f, 0x56, 0x3a, 0x2f, 0x49, 0x99, 0xfe, 0x45, 0x55, 0x75, 0x70, 0x3c, 0x88, 0x17, - 0xef, 0x82, 0x17, 0xaf, 0xde, 0xfc, 0x53, 0xe6, 0xb8, 0xc7, 0x39, 0x05, 0xa7, 0x05, 0x2f, 0x82, - 0x7f, 0xc0, 0x9c, 0xa4, 0x7f, 0xa5, 0x3b, 0x9b, 0x84, 0x9d, 0x28, 0xec, 0x2d, 0xf5, 0xbd, 0xfa, - 0xde, 0xfb, 0xde, 0x97, 0xf7, 0xaa, 0x31, 0x9d, 0xbe, 0x2d, 0x0d, 0xee, 0x9b, 0xd3, 0x70, 0x00, - 0xc2, 0x03, 0x05, 0xd2, 0x9c, 0x81, 0x37, 0xf4, 0x85, 0x99, 0x05, 0x58, 0xc0, 0x4d, 0x36, 0x74, - 0xb9, 0x94, 0xdc, 0xf7, 0x04, 0x8c, 0xb9, 0x54, 0x82, 0x29, 0xee, 0x7b, 0xe6, 0xec, 0x80, 0x39, - 0xc1, 0x84, 0x1d, 0x98, 0x63, 0xf0, 0x40, 0x30, 0x05, 0x43, 0x23, 0x10, 0xbe, 0xf2, 0xc9, 0xfd, - 0x94, 0x6a, 0xb0, 0x80, 0x1b, 0x6b, 0xa9, 0x46, 0x4e, 0xdd, 0x7f, 0x7d, 0xcc, 0xd5, 0x24, 0x1c, - 0x18, 0xb6, 0xef, 0x9a, 0x63, 0x7f, 0xec, 0x9b, 0x49, 0x86, 0x41, 0x38, 0x4a, 0x4e, 0xc9, 0x21, - 0xf9, 0x95, 0x66, 0xde, 0x7f, 0xab, 0x10, 0xe5, 0x32, 0x7b, 0xc2, 0x3d, 0x10, 0xe7, 0x66, 0x30, - 0x1d, 0xc7, 0x80, 0x34, 0x5d, 0x50, 0xcc, 0x9c, 0xad, 0xe8, 0xd9, 0x37, 0x37, 0xb1, 0x44, 0xe8, - 0x29, 0xee, 0xc2, 0x0a, 0xe1, 0xc1, 0xf3, 0x08, 0xd2, 0x9e, 0x80, 0xcb, 0x56, 0x78, 0x6f, 0x6e, - 0xe2, 0x85, 0x8a, 0x3b, 0x26, 0xf7, 0x94, 0x54, 0xe2, 0x59, 0x92, 0x7e, 0x89, 0xf0, 0x2b, 0x47, - 0xb9, 0x4b, 0x9f, 0xf8, 0xfe, 0xf4, 0xd8, 0xe1, 0xe0, 0xa9, 0x63, 0xdf, 0x1b, 0xf1, 0x31, 0x19, - 0xe1, 0x3d, 0x09, 0x62, 0xc6, 0x6d, 0xe8, 0xa0, 0x1e, 0xea, 0x37, 0x0f, 0xdf, 0x31, 0x6e, 0xec, - 0xae, 0xf1, 0x59, 0xca, 0xa4, 0x30, 0x02, 0x01, 0x9e, 0x0d, 0xd6, 0xad, 0x8b, 0x79, 0xb7, 0x12, - 0xcd, 0xbb, 0x7b, 0x79, 0x24, 0x4f, 0x4e, 0xfa, 0xb8, 0x6e, 0x33, 0x2b, 0xf4, 0x86, 0x0e, 0x74, - 0xaa, 0x3d, 0xd4, 0x6f, 0x59, 0xad, 0x68, 0xde, 0xad, 0x1f, 0x1f, 0xa5, 0x18, 0x5d, 0x44, 0xc9, - 0x7d, 0xbc, 0x17, 0x0a, 0xe7, 0x8c, 0xa9, 0x49, 0x67, 0xa7, 0x87, 0xfa, 0x8d, 0x22, 0xe9, 0xe7, - 0xf4, 0x34, 0x86, 0x69, 0x1e, 0xd7, 0xff, 0xae, 0xe2, 0x97, 0x3f, 0xfc, 0x4e, 0x81, 0xf0, 0x98, - 0xb3, 0xd4, 0x22, 0xe9, 0xe1, 0x9a, 0xc7, 0xdc, 0xb4, 0xa7, 0x86, 0xd5, 0xca, 0x32, 0xd4, 0x1e, - 0x31, 0x17, 0x68, 0x12, 0x21, 0x3f, 0xe0, 0x96, 0x5d, 0x32, 0x22, 0x11, 0xd5, 0x3c, 0xfc, 0x60, - 0x8b, 0xee, 0x37, 0x9a, 0x6a, 0xbd, 0x94, 0xd5, 0x6b, 0x95, 0x51, 0xba, 0x54, 0x8f, 0x0c, 0xf0, - 0xae, 0x08, 0x1d, 0x90, 0x9d, 0x9d, 0xde, 0x4e, 0xbf, 0x79, 0xf8, 0xee, 0x16, 0x85, 0x69, 0xe8, - 0xc0, 0x97, 0x5c, 0x4d, 0x1e, 0x07, 0x90, 0x86, 0xa4, 0xd5, 0xce, 0x2a, 0xee, 0xc6, 0x31, 0x49, - 0xd3, 0xd4, 0xe4, 0x14, 0xb7, 0x47, 0x8c, 0x3b, 0xa1, 0x80, 0x33, 0xdf, 0xe1, 0xf6, 0x79, 0xa7, - 0x96, 0xd8, 0x71, 0x2f, 0x9a, 0x77, 0xdb, 0x1f, 0x95, 0x03, 0xd7, 0xf3, 0xee, 0x9d, 0x25, 0xe0, - 0xc9, 0x79, 0x00, 0x74, 0x99, 0xac, 0xff, 0x56, 0xc5, 0xfa, 0x5a, 0xb7, 0xd3, 0x8e, 0xc2, 0x54, - 0x0b, 0xf9, 0x06, 0xd7, 0xe3, 0x45, 0x19, 0x32, 0xc5, 0xb2, 0x91, 0x7a, 0xa3, 0xd4, 0xdb, 0x62, - 0x6e, 0x8d, 0x60, 0x3a, 0x8e, 0x01, 0x69, 0xc4, 0xb7, 0x8d, 0xd9, 0x81, 0xf1, 0x78, 0xf0, 0x2d, - 0xd8, 0xea, 0x21, 0x28, 0x66, 0x91, 0xac, 0x1d, 0x5c, 0x60, 0x74, 0x91, 0x95, 0xfc, 0x8a, 0xf0, - 0x5d, 0x58, 0x27, 0x44, 0x76, 0xaa, 0x89, 0x99, 0xef, 0x6f, 0x61, 0xe6, 0xda, 0x8e, 0x2c, 0x2d, - 0x13, 0x70, 0x77, 0x6d, 0x58, 0xd2, 0x0d, 0xf5, 0xf5, 0x6b, 0x84, 0xef, 0x3d, 0xdf, 0xa3, 0x53, - 0x2e, 0x15, 0xf9, 0x7a, 0xc5, 0x27, 0xe3, 0x66, 0x3e, 0xc5, 0xec, 0xc4, 0xa5, 0xdb, 0x99, 0xc8, - 0x7a, 0x8e, 0x94, 0x3c, 0x12, 0x78, 0x97, 0x2b, 0x70, 0x73, 0x47, 0x1e, 0xfe, 0x5f, 0x47, 0x96, - 0xf4, 0x17, 0xe3, 0x76, 0x12, 0xd7, 0xa0, 0x69, 0x29, 0xfd, 0x27, 0x84, 0x9b, 0x27, 0x1e, 0x57, - 0x9c, 0x39, 0xfc, 0x7b, 0x10, 0x37, 0x58, 0xc2, 0x27, 0xf9, 0x12, 0xa4, 0x2a, 0xcd, 0x2d, 0x97, - 0x60, 0xfd, 0xd8, 0xeb, 0xff, 0x20, 0xdc, 0x29, 0xe9, 0x78, 0xd1, 0xe3, 0x19, 0xe0, 0x16, 0x2f, - 0xaa, 0xe7, 0xbd, 0x3d, 0xd8, 0xa2, 0xb7, 0x92, 0xf8, 0xe2, 0x2d, 0x29, 0x81, 0x92, 0x2e, 0x55, - 0xd0, 0xff, 0x42, 0xf8, 0xd5, 0x4d, 0x0d, 0xbf, 0x80, 0x59, 0x9b, 0x2c, 0xcf, 0xda, 0xf1, 0x7f, - 0xeb, 0xf4, 0x26, 0x13, 0xf6, 0x0b, 0xc2, 0xb5, 0xf8, 0xaf, 0x26, 0xaf, 0xe1, 0x06, 0x0b, 0xf8, - 0xc7, 0xc2, 0x0f, 0x03, 0xd9, 0x41, 0xbd, 0x9d, 0x7e, 0xc3, 0x6a, 0x47, 0xf3, 0x6e, 0xe3, 0xe8, - 0xec, 0x24, 0x05, 0x69, 0x11, 0x27, 0x07, 0xb8, 0xc9, 0x02, 0xfe, 0x05, 0x88, 0x58, 0x47, 0xaa, - 0xb2, 0x61, 0xdd, 0x8a, 0xe6, 0xdd, 0xe6, 0xd1, 0xd9, 0x49, 0x0e, 0xd3, 0xf2, 0x9d, 0x38, 0xbf, - 0x00, 0xe9, 0x87, 0xc2, 0xce, 0x5e, 0xe8, 0x2c, 0x3f, 0xcd, 0x41, 0x5a, 0xc4, 0xf5, 0xdf, 0x11, - 0x26, 0xab, 0x6f, 0x32, 0x79, 0x0f, 0x63, 0x7f, 0x71, 0xca, 0x44, 0x76, 0x93, 0xa9, 0x59, 0xa0, - 0xd7, 0xf3, 0x6e, 0x7b, 0x71, 0x4a, 0xde, 0xdc, 0x12, 0x85, 0x7c, 0x8a, 0x6b, 0xf1, 0x40, 0x67, - 0x9f, 0xa6, 0xad, 0x97, 0x63, 0xb1, 0x70, 0xf1, 0x89, 0x26, 0xa9, 0x74, 0xc0, 0xb7, 0x9f, 0xfd, - 0x68, 0x13, 0x13, 0x37, 0xe2, 0x65, 0x94, 0x01, 0xb3, 0xf3, 0x5d, 0xbd, 0x93, 0x51, 0x1b, 0x8f, - 0xf2, 0x00, 0x2d, 0xee, 0x2c, 0xf6, 0xba, 0xba, 0x69, 0xaf, 0x2d, 0xe3, 0xe2, 0x4a, 0xab, 0x3c, - 0xbd, 0xd2, 0x2a, 0x97, 0x57, 0x5a, 0xe5, 0xc7, 0x48, 0x43, 0x17, 0x91, 0x86, 0x9e, 0x46, 0x1a, - 0xba, 0x8c, 0x34, 0xf4, 0x47, 0xa4, 0xa1, 0x9f, 0xff, 0xd4, 0x2a, 0x5f, 0xd5, 0x73, 0xbd, 0xff, - 0x06, 0x00, 0x00, 0xff, 0xff, 0x01, 0xf7, 0xd5, 0xa0, 0x26, 0x0a, 0x00, 0x00, + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x3f, + 0x18, 0x6f, 0xfe, 0xdb, 0x42, 0x9b, 0x76, 0xf9, 0xcb, 0xe0, 0xa1, 0x14, 0x99, 0x96, 0x9e, 0x2a, + 0x62, 0x62, 0x57, 0x59, 0xbc, 0xee, 0xec, 0x41, 0x0a, 0xbe, 0x2c, 0x41, 0x3c, 0x88, 0x07, 0xd3, + 0xf6, 0xd9, 0x69, 0x6c, 0x27, 0x19, 0x92, 0x4c, 0x41, 0x4f, 0x5e, 0xbc, 0x0b, 0x7e, 0xa9, 0x1e, + 0xf7, 0xb8, 0xa7, 0x62, 0x47, 0xf0, 0xe8, 0x67, 0x90, 0x99, 0xe9, 0xec, 0xcc, 0x5a, 0x8b, 0xab, + 0xb7, 0x3c, 0xbf, 0x27, 0xbf, 0xb7, 0x04, 0xb3, 0xf9, 0x63, 0x43, 0x84, 0xa2, 0xf3, 0x68, 0x0c, + 0x5a, 0x82, 0x05, 0x43, 0x97, 0x20, 0xa7, 0x4a, 0xd3, 0xed, 0x82, 0x87, 0x82, 0xf2, 0x69, 0x20, + 0x8c, 0x11, 0x4a, 0x6a, 0xf0, 0x85, 0xb1, 0x9a, 0x5b, 0xa1, 0x24, 0x5d, 0x0e, 0xf9, 0x22, 0x9c, + 0xf1, 0x21, 0xf5, 0x41, 0x82, 0xe6, 0x16, 0xa6, 0x24, 0xd4, 0xca, 0x2a, 0xe7, 0x6e, 0x46, 0x25, + 0x3c, 0x14, 0xe4, 0xb7, 0x54, 0x92, 0x53, 0x3b, 0xf7, 0x7d, 0x61, 0x67, 0xd1, 0x98, 0x4c, 0x54, + 0x40, 0x7d, 0xe5, 0x2b, 0x9a, 0x2a, 0x8c, 0xa3, 0xf3, 0x74, 0x4a, 0x87, 0xf4, 0x94, 0x29, 0x77, + 0x1e, 0x15, 0xa1, 0x02, 0x3e, 0x99, 0x09, 0x09, 0xfa, 0x3d, 0x0d, 0xe7, 0x7e, 0x02, 0x18, 0x1a, + 0x80, 0xe5, 0x74, 0xb9, 0x93, 0xa7, 0x43, 0xf7, 0xb1, 0x74, 0x24, 0xad, 0x08, 0x60, 0x87, 0x70, + 0xfc, 0x27, 0x82, 0x99, 0xcc, 0x20, 0xe0, 0x3b, 0xbc, 0x87, 0xfb, 0x78, 0x91, 0x15, 0x0b, 0x2a, + 0xa4, 0x35, 0x56, 0xff, 0x4a, 0xea, 0x7f, 0x42, 0xb8, 0x39, 0x92, 0xc2, 0x0a, 0xbe, 0x10, 0x1f, + 0x40, 0x3b, 0x3d, 0x5c, 0x95, 0x3c, 0x80, 0x36, 0xea, 0xa1, 0x41, 0xc3, 0x6b, 0xad, 0xd6, 0xdd, + 0x4a, 0xbc, 0xee, 0x56, 0x9f, 0xf3, 0x00, 0x58, 0xba, 0x71, 0x5e, 0xe2, 0x9a, 0x8e, 0x16, 0x60, + 0xda, 0xff, 0xf5, 0x0e, 0x06, 0xcd, 0x23, 0x4a, 0x6e, 0xfc, 0xde, 0x84, 0x45, 0x0b, 0xf0, 0x0e, + 0xb7, 0x9a, 0xb5, 0x64, 0x32, 0x2c, 0x13, 0xeb, 0xff, 0x40, 0xb8, 0x5d, 0xca, 0x71, 0xaa, 0xe4, + 0xb9, 0xf0, 0xa3, 0x4c, 0xc0, 0x79, 0x8b, 0xeb, 0xc9, 0xeb, 0x4e, 0xb9, 0xe5, 0x69, 0xb0, 0xe6, + 0xd1, 0x83, 0x92, 0xeb, 0x55, 0x59, 0x12, 0xce, 0xfd, 0x04, 0x30, 0x24, 0xb9, 0x4d, 0x96, 0x43, + 0xf2, 0x62, 0xfc, 0x0e, 0x26, 0xf6, 0x19, 0x58, 0xee, 0x39, 0x5b, 0x5b, 0x5c, 0x60, 0xec, 0x4a, + 0xd5, 0x09, 0x71, 0x4b, 0x14, 0xee, 0x79, 0xb7, 0xe3, 0xbf, 0xe8, 0x56, 0x0a, 0xef, 0xdd, 0xde, + 0x7a, 0xb5, 0x4a, 0xa0, 0x61, 0xd7, 0x1c, 0xfa, 0xdf, 0x11, 0xbe, 0xb3, 0xaf, 0xf0, 0x53, 0x61, + 0xac, 0xf3, 0x66, 0xa7, 0x34, 0xb9, 0x59, 0xe9, 0x84, 0x9d, 0x56, 0xbe, 0xb5, 0x8d, 0x51, 0xcf, + 0x91, 0x52, 0xe1, 0x19, 0xae, 0x09, 0x0b, 0x41, 0xde, 0xf4, 0xf4, 0xdf, 0x9a, 0x5e, 0x4b, 0x5d, + 0xfc, 0xec, 0x28, 0x51, 0x66, 0x99, 0x41, 0xff, 0x0b, 0xc2, 0xd5, 0xe4, 0xab, 0x9d, 0x7b, 0xb8, + 0xc1, 0x43, 0xf1, 0x44, 0xab, 0x28, 0x34, 0x6d, 0xd4, 0x3b, 0x18, 0x34, 0xbc, 0xc3, 0x78, 0xdd, + 0x6d, 0x9c, 0x9c, 0x8d, 0x32, 0x90, 0x15, 0x7b, 0x67, 0x88, 0x9b, 0x3c, 0x14, 0xaf, 0x40, 0x27, + 0x39, 0xb2, 0x94, 0x0d, 0xef, 0xff, 0x78, 0xdd, 0x6d, 0x9e, 0x9c, 0x8d, 0x72, 0x98, 0x95, 0xef, + 0x24, 0xfa, 0x1a, 0x8c, 0x8a, 0xf4, 0x04, 0x4c, 0xfb, 0xa0, 0xd0, 0x67, 0x39, 0xc8, 0x8a, 0xbd, + 0x47, 0x56, 0x1b, 0xb7, 0x72, 0xb1, 0x71, 0x2b, 0x97, 0x1b, 0xb7, 0xf2, 0x31, 0x76, 0xd1, 0x2a, + 0x76, 0xd1, 0x45, 0xec, 0xa2, 0xcb, 0xd8, 0x45, 0x5f, 0x63, 0x17, 0x7d, 0xfe, 0xe6, 0x56, 0x5e, + 0xd7, 0xf3, 0xd2, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xfb, 0x23, 0x89, 0xaa, 0x04, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index b431b318..82508ca6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,72 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -message AdmissionHookClientConfig { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - optional ServiceReference service = 1; - - // URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object. - optional string urlPath = 3; - - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - optional bytes caBundle = 2; -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -message ExternalAdmissionHook { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - optional string name = 1; - - // ClientConfig defines how to communicate with the hook. - // Required - optional AdmissionHookClientConfig clientConfig = 2; - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - repeated RuleWithOperations rules = 3; - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - optional string failurePolicy = 4; -} - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -message ExternalAdmissionHookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated ExternalAdmissionHook externalAdmissionHooks = 2; -} - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -message ExternalAdmissionHookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ExternalAdmissionHookConfiguration. - repeated ExternalAdmissionHookConfiguration items = 2; -} - // Initializer describes the name and the failure policy of an initializer, and // what resources it applies to. message Initializer { @@ -155,7 +89,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -163,36 +97,12 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; } -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; - - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // Namespace is the namespace of the service - // Required - optional string namespace = 1; - - // Name is the name of the service - // Required - optional string name = 2; -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index e178cf74..e9a4164c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -45,8 +45,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &InitializerConfiguration{}, &InitializerConfigurationList{}, - &ExternalAdmissionHookConfiguration{}, - &ExternalAdmissionHookConfigurationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 84f47b62..a245f1e8 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -104,120 +104,3 @@ type Rule struct { // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` } - -type FailurePolicyType string - -const ( - // Ignore means the initializer is removed from the initializers list of an - // object if the initializer is timed out. - Ignore FailurePolicyType = "Ignore" - // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the - // extensible admission feature is beta. - Fail FailurePolicyType = "Fail" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -type ExternalAdmissionHookConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ExternalAdmissionHooks []ExternalAdmissionHook `json:"externalAdmissionHooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=externalAdmissionHooks"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -type ExternalAdmissionHookConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of ExternalAdmissionHookConfiguration. - Items []ExternalAdmissionHookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -type ExternalAdmissionHook struct { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig AdmissionHookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} - -type OperationType string - -// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. -const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" -) - -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -type AdmissionHookClientConfig struct { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - - // URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object. - URLPath string `json:"urlPath" protobuf:"bytes,3,opt,name=urlPath"` - - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // Namespace is the namespace of the service - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Name is the name of the service - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index 77a8a519..e2494e5d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -27,49 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_AdmissionHookClientConfig = map[string]string{ - "": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook", - "service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required", - "urlPath": "URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object.", - "caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required", -} - -func (AdmissionHookClientConfig) SwaggerDoc() map[string]string { - return map_AdmissionHookClientConfig -} - -var map_ExternalAdmissionHook = map[string]string{ - "": "ExternalAdmissionHook describes an external admission webhook and the resources and operations it applies to.", - "name": "The name of the external admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", -} - -func (ExternalAdmissionHook) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHook -} - -var map_ExternalAdmissionHookConfiguration = map[string]string{ - "": "ExternalAdmissionHookConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "externalAdmissionHooks": "ExternalAdmissionHooks is a list of external admission webhooks and the affected resources and operations.", -} - -func (ExternalAdmissionHookConfiguration) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfiguration -} - -var map_ExternalAdmissionHookConfigurationList = map[string]string{ - "": "ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ExternalAdmissionHookConfiguration.", -} - -func (ExternalAdmissionHookConfigurationList) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfigurationList -} - var map_Initializer = map[string]string{ "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", @@ -111,23 +68,4 @@ func (Rule) SwaggerDoc() map[string]string { return map_Rule } -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", -} - -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "Namespace is the namespace of the service Required", - "name": "Name is the name of the service Required", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 118fed75..850de37c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -21,187 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionHookClientConfig).DeepCopyInto(out.(*AdmissionHookClientConfig)) - return nil - }, InType: reflect.TypeOf(&AdmissionHookClientConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHook).DeepCopyInto(out.(*ExternalAdmissionHook)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHook{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfiguration).DeepCopyInto(out.(*ExternalAdmissionHookConfiguration)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfigurationList).DeepCopyInto(out.(*ExternalAdmissionHookConfigurationList)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfiguration).DeepCopyInto(out.(*InitializerConfiguration)) - return nil - }, InType: reflect.TypeOf(&InitializerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfigurationList).DeepCopyInto(out.(*InitializerConfigurationList)) - return nil - }, InType: reflect.TypeOf(&InitializerConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Rule).DeepCopyInto(out.(*Rule)) - return nil - }, InType: reflect.TypeOf(&Rule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RuleWithOperations).DeepCopyInto(out.(*RuleWithOperations)) - return nil - }, InType: reflect.TypeOf(&RuleWithOperations{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionHookClientConfig) DeepCopyInto(out *AdmissionHookClientConfig) { - *out = *in - out.Service = in.Service - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionHookClientConfig. -func (in *AdmissionHookClientConfig) DeepCopy() *AdmissionHookClientConfig { - if in == nil { - return nil - } - out := new(AdmissionHookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHook) DeepCopyInto(out *ExternalAdmissionHook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - if *in == nil { - *out = nil - } else { - *out = new(FailurePolicyType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHook. -func (in *ExternalAdmissionHook) DeepCopy() *ExternalAdmissionHook { - if in == nil { - return nil - } - out := new(ExternalAdmissionHook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfiguration) DeepCopyInto(out *ExternalAdmissionHookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ExternalAdmissionHooks != nil { - in, out := &in.ExternalAdmissionHooks, &out.ExternalAdmissionHooks - *out = make([]ExternalAdmissionHook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfiguration. -func (in *ExternalAdmissionHookConfiguration) DeepCopy() *ExternalAdmissionHookConfiguration { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyInto(out *ExternalAdmissionHookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ExternalAdmissionHookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfigurationList. -func (in *ExternalAdmissionHookConfigurationList) DeepCopy() *ExternalAdmissionHookConfigurationList { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Initializer) DeepCopyInto(out *Initializer) { *out = *in @@ -323,41 +145,3 @@ func (in *Rule) DeepCopy() *Rule { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) - } - in.Rule.DeepCopyInto(&out.Rule) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { - if in == nil { - return nil - } - out := new(RuleWithOperations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go new file mode 100644 index 00000000..afbb3d6d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go new file mode 100644 index 00000000..392ebf99 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -0,0 +1,2150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + + It has these top-level messages: + MutatingWebhookConfiguration + MutatingWebhookConfigurationList + Rule + RuleWithOperations + ServiceReference + ValidatingWebhookConfiguration + ValidatingWebhookConfigurationList + Webhook + WebhookClientConfig +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func init() { + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) + n3, err := m.Rule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *MutatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 916 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xd6, 0x8e, 0x6c, 0x8f, 0x6d, 0xd1, 0x0c, 0x20, 0x99, 0xa8, 0xda, 0xb5, 0x7c, 0x40, + 0x96, 0x50, 0x76, 0x71, 0x8a, 0x10, 0x42, 0x20, 0x94, 0x8d, 0x54, 0x88, 0x94, 0xb4, 0x66, 0x02, + 0xad, 0x84, 0x38, 0x30, 0x5e, 0xbf, 0xd8, 0x83, 0xf7, 0x97, 0x66, 0x66, 0xdd, 0xe6, 0x86, 0xc4, + 0x3f, 0x80, 0xc4, 0x1f, 0xc1, 0x5f, 0xc1, 0x3d, 0x37, 0x7a, 0x41, 0xf4, 0xb4, 0x22, 0xcb, 0x99, + 0x03, 0xd7, 0x9e, 0xd0, 0xce, 0xae, 0xbd, 0x76, 0x1c, 0xa7, 0xee, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xde, 0xf7, 0xbd, 0xf7, 0x3d, 0xbf, 0xb7, 0xe8, 0xcb, 0xe9, 0x47, 0xc2, 0x64, 0x81, 0x35, 0x8d, + 0x86, 0xc0, 0x7d, 0x90, 0x20, 0xac, 0x19, 0xf8, 0xa3, 0x80, 0x5b, 0x79, 0x80, 0x86, 0xcc, 0xa2, + 0x23, 0x8f, 0x09, 0xc1, 0x02, 0x9f, 0xc3, 0x98, 0x09, 0xc9, 0xa9, 0x64, 0x81, 0x6f, 0xcd, 0xfa, + 0x43, 0x90, 0xb4, 0x6f, 0x8d, 0xc1, 0x07, 0x4e, 0x25, 0x8c, 0xcc, 0x90, 0x07, 0x32, 0xc0, 0xbd, + 0x8c, 0x69, 0xd2, 0x90, 0x99, 0x37, 0x32, 0xcd, 0x9c, 0xb9, 0xb7, 0x3f, 0x66, 0x72, 0x12, 0x0d, + 0x4d, 0x27, 0xf0, 0xac, 0x71, 0x30, 0x0e, 0x2c, 0x25, 0x30, 0x8c, 0xce, 0xd5, 0x4b, 0x3d, 0xd4, + 0xaf, 0x4c, 0x78, 0xaf, 0xbb, 0xd4, 0x92, 0x13, 0x70, 0xb0, 0x66, 0x6b, 0xc5, 0xf7, 0x4e, 0x8b, + 0x1c, 0x78, 0x26, 0xc1, 0x4f, 0x6b, 0x8b, 0x7d, 0x1a, 0x32, 0x01, 0x7c, 0x06, 0xdc, 0x0a, 0xa7, + 0xe3, 0x34, 0x26, 0x56, 0x13, 0x36, 0x79, 0xd9, 0xfb, 0xa0, 0x90, 0xf3, 0xa8, 0x33, 0x61, 0x3e, + 0xf0, 0x8b, 0x42, 0xc3, 0x03, 0x49, 0x6f, 0x6a, 0xc2, 0xda, 0xc4, 0xe2, 0x91, 0x2f, 0x99, 0x07, + 0x6b, 0x84, 0x0f, 0x5f, 0x45, 0x10, 0xce, 0x04, 0x3c, 0xba, 0xc6, 0xbb, 0xbf, 0x89, 0x17, 0x49, + 0xe6, 0x5a, 0xcc, 0x97, 0x42, 0xf2, 0xeb, 0xa4, 0xee, 0xef, 0x1a, 0xba, 0x77, 0x1a, 0x49, 0x2a, + 0x99, 0x3f, 0x7e, 0x02, 0xc3, 0x49, 0x10, 0x4c, 0x8f, 0x02, 0xff, 0x9c, 0x8d, 0xa3, 0xec, 0xef, + 0xc1, 0xdf, 0xa1, 0x5a, 0xea, 0x6c, 0x44, 0x25, 0x6d, 0x6b, 0x1d, 0xad, 0xd7, 0x38, 0x78, 0xdf, + 0x2c, 0xfe, 0xd3, 0x45, 0x21, 0x33, 0x9c, 0x8e, 0x53, 0x40, 0x98, 0x69, 0xb6, 0x39, 0xeb, 0x9b, + 0x8f, 0x86, 0xdf, 0x83, 0x23, 0x4f, 0x41, 0x52, 0x1b, 0x5f, 0xc6, 0x46, 0x29, 0x89, 0x0d, 0x54, + 0x60, 0x64, 0xa1, 0x8a, 0xcf, 0x50, 0x2d, 0xaf, 0x2c, 0xda, 0x77, 0x3a, 0xe5, 0x5e, 0xe3, 0xa0, + 0x6f, 0x6e, 0xbb, 0x35, 0x66, 0xce, 0xb4, 0x2b, 0x69, 0x09, 0x52, 0x7b, 0x9a, 0x0b, 0x75, 0xff, + 0xd6, 0x50, 0xe7, 0x36, 0x5f, 0x27, 0x4c, 0x48, 0xfc, 0xed, 0x9a, 0x37, 0x73, 0x3b, 0x6f, 0x29, + 0x5b, 0x39, 0xbb, 0x9b, 0x3b, 0xab, 0xcd, 0x91, 0x25, 0x5f, 0x53, 0xb4, 0xc3, 0x24, 0x78, 0x73, + 0x53, 0x0f, 0xb6, 0x37, 0x75, 0x5b, 0xe3, 0x76, 0x2b, 0x2f, 0xb9, 0x73, 0x9c, 0x8a, 0x93, 0xac, + 0x46, 0xf7, 0x67, 0x0d, 0x55, 0x48, 0xe4, 0x02, 0x7e, 0x0f, 0xd5, 0x69, 0xc8, 0x3e, 0xe7, 0x41, + 0x14, 0x8a, 0xb6, 0xd6, 0x29, 0xf7, 0xea, 0x76, 0x2b, 0x89, 0x8d, 0xfa, 0xe1, 0xe0, 0x38, 0x03, + 0x49, 0x11, 0xc7, 0x7d, 0xd4, 0xa0, 0x21, 0x7b, 0x0c, 0x5c, 0x2d, 0xbe, 0x6a, 0xb4, 0x6e, 0xbf, + 0x91, 0xc4, 0x46, 0xe3, 0x70, 0x70, 0x3c, 0x87, 0xc9, 0x72, 0x4e, 0xaa, 0xcf, 0x41, 0x04, 0x11, + 0x77, 0x40, 0xb4, 0xcb, 0x85, 0x3e, 0x99, 0x83, 0xa4, 0x88, 0x77, 0x7f, 0xd1, 0x10, 0x4e, 0xbb, + 0x7a, 0xc2, 0xe4, 0xe4, 0x51, 0x08, 0x99, 0x03, 0x81, 0x3f, 0x43, 0x28, 0x58, 0xbc, 0xf2, 0x26, + 0x0d, 0xb5, 0x1f, 0x0b, 0xf4, 0x65, 0x6c, 0xb4, 0x16, 0xaf, 0xaf, 0x2e, 0x42, 0x20, 0x4b, 0x14, + 0x3c, 0x40, 0x15, 0x1e, 0xb9, 0xd0, 0xbe, 0xb3, 0xf6, 0xa7, 0xbd, 0x62, 0xb2, 0x69, 0x33, 0x76, + 0x33, 0x9f, 0xa0, 0x1a, 0x18, 0x51, 0x4a, 0xdd, 0x1f, 0x35, 0x74, 0xf7, 0x0c, 0xf8, 0x8c, 0x39, + 0x40, 0xe0, 0x1c, 0x38, 0xf8, 0x0e, 0x60, 0x0b, 0xd5, 0x7d, 0xea, 0x81, 0x08, 0xa9, 0x03, 0x6a, + 0x41, 0xea, 0xf6, 0x6e, 0xce, 0xad, 0x3f, 0x9c, 0x07, 0x48, 0x91, 0x83, 0x3b, 0xa8, 0x92, 0x3e, + 0x54, 0x5f, 0xf5, 0xa2, 0x4e, 0x9a, 0x4b, 0x54, 0x04, 0xdf, 0x43, 0x95, 0x90, 0xca, 0x49, 0xbb, + 0xac, 0x32, 0x6a, 0x69, 0x74, 0x40, 0xe5, 0x84, 0x28, 0xb4, 0xfb, 0x87, 0x86, 0xf4, 0xc7, 0xd4, + 0x65, 0xa3, 0xff, 0xdd, 0x3d, 0xfe, 0xa3, 0xa1, 0xee, 0xed, 0xce, 0xfe, 0x83, 0x8b, 0xf4, 0x56, + 0x2f, 0xf2, 0x8b, 0xed, 0x6d, 0xdd, 0xde, 0xfa, 0x86, 0x9b, 0xfc, 0xad, 0x8c, 0xaa, 0x79, 0xfa, + 0x62, 0x33, 0xb4, 0x8d, 0x9b, 0xf1, 0x14, 0x35, 0x1d, 0x97, 0x81, 0x2f, 0x33, 0xe9, 0x7c, 0xb7, + 0x3f, 0x7d, 0xed, 0xd1, 0x1f, 0x2d, 0x89, 0xd8, 0x6f, 0xe5, 0x85, 0x9a, 0xcb, 0x28, 0x59, 0x29, + 0x84, 0x29, 0xda, 0x49, 0x4f, 0x20, 0xbb, 0xe6, 0xc6, 0xc1, 0x27, 0xaf, 0x77, 0x4d, 0xab, 0xa7, + 0x5d, 0x4c, 0x22, 0x8d, 0x09, 0x92, 0x29, 0xe3, 0x13, 0xd4, 0x3a, 0xa7, 0xcc, 0x8d, 0x38, 0x0c, + 0x02, 0x97, 0x39, 0x17, 0xed, 0x8a, 0x1a, 0xc3, 0xbb, 0x49, 0x6c, 0xb4, 0x1e, 0x2c, 0x07, 0x5e, + 0xc6, 0xc6, 0xee, 0x0a, 0xa0, 0x4e, 0x7f, 0x95, 0x8c, 0x9f, 0xa1, 0xdd, 0xc5, 0xc9, 0x9d, 0x81, + 0x0b, 0x8e, 0x0c, 0x78, 0x7b, 0x47, 0x8d, 0xeb, 0xfe, 0x96, 0xdb, 0x42, 0x87, 0xe0, 0xce, 0xa9, + 0xf6, 0xdb, 0x49, 0x6c, 0xec, 0x3e, 0xbc, 0xae, 0x48, 0xd6, 0x8b, 0x74, 0x7f, 0xd5, 0xd0, 0x9b, + 0x37, 0x8c, 0x19, 0x53, 0x54, 0x15, 0xd9, 0xc7, 0x23, 0xdf, 0xda, 0x8f, 0xb7, 0x1f, 0xe2, 0xf5, + 0xaf, 0x8e, 0xdd, 0x48, 0x62, 0xa3, 0x3a, 0x47, 0xe7, 0xba, 0xb8, 0x87, 0x6a, 0x0e, 0xb5, 0x23, + 0x7f, 0x94, 0x7f, 0xf6, 0x9a, 0x76, 0x33, 0xdd, 0xf2, 0xa3, 0xc3, 0x0c, 0x23, 0x8b, 0x28, 0x7e, + 0x07, 0x95, 0x23, 0xee, 0xe6, 0x5f, 0x98, 0x6a, 0x12, 0x1b, 0xe5, 0xaf, 0xc9, 0x09, 0x49, 0x31, + 0x7b, 0xff, 0xf2, 0x4a, 0x2f, 0x3d, 0xbf, 0xd2, 0x4b, 0x2f, 0xae, 0xf4, 0xd2, 0x0f, 0x89, 0xae, + 0x5d, 0x26, 0xba, 0xf6, 0x3c, 0xd1, 0xb5, 0x17, 0x89, 0xae, 0xfd, 0x99, 0xe8, 0xda, 0x4f, 0x7f, + 0xe9, 0xa5, 0x6f, 0xaa, 0x79, 0x6b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x78, 0x57, 0x76, 0x28, + 0x10, 0x0a, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto new file mode 100644 index 00000000..513a3d16 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +message Webhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go new file mode 100644 index 00000000..d126da9f --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go new file mode 100644 index 00000000..30d2750c --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -0,0 +1,281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} + +type FailurePolicyType string + +const ( + // Ignore means the initializer is removed from the initializers list of an + // object if the initializer is timed out. + Ignore FailurePolicyType = "Ignore" + // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the + // extensible admission feature is beta. + Fail FailurePolicyType = "Fail" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..ea8c1e37 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is other cluster scoped resource, it is not subjected to the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nIf there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..fbd9ad32 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,321 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + if *in == nil { + *out = nil + } else { + *out = new(FailurePolicyType) + **out = **in + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + if *in == nil { + *out = nil + } else { + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 62eb80ce..1d66c222 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index f32e878f..38e7415b 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -25,12 +25,34 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto It has these top-level messages: + ControllerRevision + ControllerRevisionList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy */ package v1 @@ -38,6 +60,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -58,38 +82,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{27} +} func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -108,27 +323,69 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n1 + i += n4 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) + n5, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n5 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) + n6, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n6 + return i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } @@ -150,11 +407,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -189,28 +446,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n5, err := m.Selector.MarshalTo(dAtA[i:]) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n9 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n6, err := m.Template.MarshalTo(dAtA[i:]) + n10, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n10 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n7, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n11 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -266,6 +523,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -292,11 +561,507 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n8, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n12 + } + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -320,11 +1085,358 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n30 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + } + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n37, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n38, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n39, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 } return i, nil } @@ -356,6 +1468,31 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *DaemonSet) Size() (n int) { var l int _ = l @@ -368,6 +1505,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -414,6 +1567,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -429,6 +1588,183 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { return n } +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *RollingUpdateDaemonSet) Size() (n int) { var l int _ = l @@ -439,6 +1775,137 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { return n } +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -452,6 +1919,29 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSet) String() string { if this == nil { return "nil" @@ -464,6 +1954,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -503,6 +2007,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -518,6 +2023,154 @@ func (this *DaemonSetUpdateStrategy) String() string { }, "") return s } +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *RollingUpdateDaemonSet) String() string { if this == nil { return "nil" @@ -528,6 +2181,110 @@ func (this *RollingUpdateDaemonSet) String() string { }, "") return s } +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -536,6 +2293,246 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -676,6 +2673,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1170,6 +3363,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1303,6 +3527,1827 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } return nil } +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1386,6 +5431,1277 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -1496,63 +6812,134 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 928 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x73, 0xdb, 0x44, - 0x14, 0xc7, 0xad, 0x38, 0x0e, 0xce, 0xa6, 0x71, 0xc8, 0xd2, 0x49, 0x45, 0x18, 0xe4, 0x60, 0x2e, - 0x86, 0x0e, 0x12, 0x6e, 0x81, 0x61, 0xe0, 0xc0, 0x54, 0xe9, 0x4c, 0x29, 0x24, 0x0e, 0xac, 0x1b, - 0x0e, 0x0c, 0xcc, 0xb0, 0x96, 0x1e, 0xce, 0x62, 0xfd, 0x1a, 0xed, 0xca, 0x83, 0x6f, 0x9c, 0x38, - 0xf3, 0xa7, 0xc0, 0x5f, 0xd0, 0x6b, 0x8e, 0x3d, 0xf6, 0xe4, 0x21, 0xe6, 0xbf, 0xe0, 0x02, 0xb3, - 0xab, 0x8d, 0x6d, 0xd9, 0x72, 0x9a, 0x9b, 0xf7, 0xbd, 0xef, 0xf7, 0xb3, 0x6f, 0xdf, 0x3e, 0xaf, - 0xd0, 0xe7, 0xc3, 0x4f, 0xb9, 0xcd, 0x62, 0x67, 0x98, 0xf5, 0x21, 0x8d, 0x40, 0x00, 0x77, 0x46, - 0x10, 0xf9, 0x71, 0xea, 0xe8, 0x04, 0x4d, 0x98, 0x43, 0x93, 0x84, 0x3b, 0xa3, 0x8e, 0x33, 0x80, - 0x08, 0x52, 0x2a, 0xc0, 0xb7, 0x93, 0x34, 0x16, 0x31, 0xc6, 0xb9, 0xc6, 0xa6, 0x09, 0xb3, 0xa5, - 0xc6, 0x1e, 0x75, 0x0e, 0x3f, 0x18, 0x30, 0x71, 0x91, 0xf5, 0x6d, 0x2f, 0x0e, 0x9d, 0x41, 0x3c, - 0x88, 0x1d, 0x25, 0xed, 0x67, 0x3f, 0xab, 0x95, 0x5a, 0xa8, 0x5f, 0x39, 0xe2, 0xb0, 0xb5, 0xb0, - 0x8d, 0x17, 0xa7, 0x50, 0xb2, 0xcd, 0xe1, 0x7b, 0x0b, 0x9a, 0x24, 0x0e, 0x98, 0x37, 0x76, 0x46, - 0x9d, 0x3e, 0x08, 0xba, 0x2a, 0xfd, 0x68, 0x2e, 0x0d, 0xa9, 0x77, 0xc1, 0x22, 0x48, 0xc7, 0x4e, - 0x32, 0x1c, 0xc8, 0x00, 0x77, 0x42, 0x10, 0xb4, 0x6c, 0x03, 0x67, 0x9d, 0x2b, 0xcd, 0x22, 0xc1, - 0x42, 0x58, 0x31, 0x7c, 0xf2, 0x2a, 0x03, 0xf7, 0x2e, 0x20, 0xa4, 0x2b, 0xbe, 0x87, 0xeb, 0x7c, - 0x99, 0x60, 0x81, 0xc3, 0x22, 0xc1, 0x45, 0xba, 0x6c, 0x6a, 0xfd, 0x67, 0xa0, 0xed, 0xc7, 0x14, - 0xc2, 0x38, 0xea, 0x81, 0xc0, 0x3f, 0xa1, 0xba, 0x3c, 0x86, 0x4f, 0x05, 0x35, 0x8d, 0x23, 0xa3, - 0xbd, 0xf3, 0xe0, 0x43, 0x7b, 0x7e, 0x0d, 0x33, 0xaa, 0x9d, 0x0c, 0x07, 0x32, 0xc0, 0x6d, 0xa9, - 0xb6, 0x47, 0x1d, 0xfb, 0xac, 0xff, 0x0b, 0x78, 0xe2, 0x14, 0x04, 0x75, 0xf1, 0xe5, 0xa4, 0x59, - 0x99, 0x4e, 0x9a, 0x68, 0x1e, 0x23, 0x33, 0x2a, 0x3e, 0x46, 0x9b, 0x3c, 0x01, 0xcf, 0xdc, 0x50, - 0xf4, 0x77, 0xec, 0xd5, 0x4b, 0xb6, 0x67, 0xe5, 0xf4, 0x12, 0xf0, 0xdc, 0x3b, 0x1a, 0xb7, 0x29, - 0x57, 0x44, 0x99, 0xf1, 0xd7, 0x68, 0x8b, 0x0b, 0x2a, 0x32, 0x6e, 0x56, 0x15, 0xe6, 0xdd, 0x9b, - 0x31, 0x4a, 0xea, 0x36, 0x34, 0x68, 0x2b, 0x5f, 0x13, 0x8d, 0x68, 0xfd, 0x65, 0xa0, 0xdd, 0x99, - 0xf6, 0x84, 0x71, 0x81, 0x7f, 0x58, 0xe9, 0x82, 0x7d, 0xbb, 0x2e, 0x48, 0xb7, 0xea, 0xc1, 0xeb, - 0x7a, 0xaf, 0xfa, 0x75, 0x64, 0xa1, 0x03, 0x2e, 0xaa, 0x31, 0x01, 0x21, 0x37, 0x37, 0x8e, 0xaa, - 0xed, 0x9d, 0x07, 0x6f, 0xdf, 0x58, 0xbb, 0xbb, 0xab, 0x49, 0xb5, 0xa7, 0xd2, 0x43, 0x72, 0x6b, - 0xeb, 0x79, 0x75, 0xa1, 0x66, 0xd9, 0x18, 0xfc, 0x23, 0xaa, 0x73, 0x08, 0xc0, 0x13, 0x71, 0xaa, - 0x6b, 0x7e, 0x78, 0xcb, 0x9a, 0x69, 0x1f, 0x82, 0x9e, 0xb6, 0xba, 0x77, 0x64, 0xd1, 0xd7, 0x2b, - 0x32, 0x43, 0xe2, 0x6f, 0x51, 0x5d, 0x40, 0x98, 0x04, 0x54, 0x80, 0xbe, 0xba, 0x42, 0xcf, 0xe5, - 0x9f, 0x4b, 0xc2, 0xbe, 0x89, 0xfd, 0x67, 0x5a, 0xa6, 0x2e, 0x6f, 0xd6, 0x87, 0xeb, 0x28, 0x99, - 0x61, 0xf0, 0x10, 0x35, 0xb2, 0xc4, 0x97, 0x4a, 0x21, 0x07, 0x72, 0x30, 0xd6, 0x97, 0x79, 0xff, - 0xc6, 0x86, 0x9c, 0x17, 0x2c, 0xee, 0x81, 0xde, 0xa0, 0x51, 0x8c, 0x93, 0x25, 0x34, 0x7e, 0x84, - 0xf6, 0x42, 0x16, 0x11, 0xa0, 0xfe, 0xb8, 0x07, 0x5e, 0x1c, 0xf9, 0xdc, 0xdc, 0x3c, 0x32, 0xda, - 0x35, 0xf7, 0x9e, 0x06, 0xec, 0x9d, 0x16, 0xd3, 0x64, 0x59, 0x8f, 0x4f, 0xd0, 0xdd, 0x14, 0x46, - 0x8c, 0xb3, 0x38, 0xfa, 0x92, 0x71, 0x11, 0xa7, 0xe3, 0x13, 0x16, 0x32, 0x61, 0x6e, 0x29, 0x8e, - 0x39, 0x9d, 0x34, 0xef, 0x92, 0x92, 0x3c, 0x29, 0x75, 0xb5, 0xfe, 0xac, 0xa1, 0xbd, 0xa5, 0x09, - 0xc5, 0xdf, 0xa1, 0x03, 0x2f, 0x4b, 0x53, 0x88, 0x44, 0x37, 0x0b, 0xfb, 0x90, 0xf6, 0xbc, 0x0b, - 0xf0, 0xb3, 0x00, 0x7c, 0x75, 0xa3, 0x35, 0xd7, 0xd2, 0xb5, 0x1e, 0x1c, 0x97, 0xaa, 0xc8, 0x1a, - 0x37, 0xfe, 0x0a, 0xe1, 0x48, 0x85, 0x4e, 0x19, 0xe7, 0x33, 0xe6, 0x86, 0x62, 0x1e, 0x6a, 0x26, - 0xee, 0xae, 0x28, 0x48, 0x89, 0x4b, 0xd6, 0xe8, 0x03, 0x67, 0x29, 0xf8, 0xcb, 0x35, 0x56, 0x8b, - 0x35, 0x3e, 0x2e, 0x55, 0x91, 0x35, 0x6e, 0xfc, 0x31, 0xda, 0xc9, 0x77, 0x53, 0x3d, 0xd7, 0x97, - 0xf3, 0x86, 0x86, 0xed, 0x74, 0xe7, 0x29, 0xb2, 0xa8, 0x93, 0x47, 0x8b, 0xfb, 0x1c, 0xd2, 0x11, - 0xf8, 0x4f, 0xf2, 0x97, 0x8d, 0xc5, 0x91, 0x59, 0x3b, 0x32, 0xda, 0xd5, 0xf9, 0xd1, 0xce, 0x56, - 0x14, 0xa4, 0xc4, 0x25, 0x8f, 0x96, 0x4f, 0xcd, 0xca, 0xd1, 0xb6, 0x8a, 0x47, 0x3b, 0x2f, 0x55, - 0x91, 0x35, 0x6e, 0x39, 0x7b, 0x79, 0xc9, 0x8f, 0x46, 0x94, 0x05, 0xb4, 0x1f, 0x80, 0xf9, 0x5a, - 0x71, 0xf6, 0xba, 0xc5, 0x34, 0x59, 0xd6, 0xe3, 0x27, 0x68, 0x3f, 0x0f, 0x9d, 0x47, 0x74, 0x06, - 0xa9, 0x2b, 0xc8, 0x9b, 0x1a, 0xb2, 0xdf, 0x5d, 0x16, 0x90, 0x55, 0x0f, 0xfe, 0x0c, 0x35, 0xbc, - 0x38, 0x08, 0xd4, 0x3c, 0x1e, 0xc7, 0x59, 0x24, 0xcc, 0x6d, 0x45, 0xc1, 0xf2, 0x3f, 0x74, 0x5c, - 0xc8, 0x90, 0x25, 0x65, 0xeb, 0xb9, 0x81, 0xee, 0xad, 0xf9, 0x1f, 0xe2, 0x2f, 0xd0, 0xa6, 0x18, - 0x27, 0xa0, 0x06, 0x75, 0xdb, 0xbd, 0x7f, 0xfd, 0x66, 0x3f, 0x1b, 0x27, 0xf0, 0xef, 0xa4, 0xf9, - 0xd6, 0x1a, 0x9b, 0x4c, 0x13, 0x65, 0xc4, 0x1e, 0xda, 0x4d, 0xe5, 0x76, 0xd1, 0x20, 0x97, 0xe8, - 0x57, 0xe6, 0xfd, 0xb2, 0xc7, 0x80, 0x2c, 0x0a, 0xe7, 0x4f, 0xe5, 0xfe, 0x74, 0xd2, 0xdc, 0x2d, - 0xe4, 0x48, 0x91, 0xd9, 0xfa, 0xdd, 0x40, 0x07, 0xe5, 0x66, 0x1c, 0xa0, 0x46, 0x48, 0x7f, 0x5d, - 0x6c, 0xef, 0xab, 0xbe, 0x7f, 0xf2, 0xab, 0x6a, 0xe7, 0x5f, 0x55, 0xfb, 0x69, 0x24, 0xce, 0xd2, - 0x9e, 0x48, 0x59, 0x34, 0xc8, 0x5b, 0x79, 0x5a, 0x60, 0x91, 0x25, 0xb6, 0xdb, 0xbe, 0xbc, 0xb2, - 0x2a, 0x2f, 0xae, 0xac, 0xca, 0xcb, 0x2b, 0xab, 0xf2, 0xdb, 0xd4, 0x32, 0x2e, 0xa7, 0x96, 0xf1, - 0x62, 0x6a, 0x19, 0x2f, 0xa7, 0x96, 0xf1, 0xf7, 0xd4, 0x32, 0xfe, 0xf8, 0xc7, 0xaa, 0x7c, 0xbf, - 0x31, 0xea, 0xfc, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x89, 0x78, 0x09, 0x33, 0x43, 0x09, 0x00, 0x00, + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0xcd, 0x66, 0x7b, 0xf0, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0x35, 0x33, 0xb5, 0xe3, 0x8e, 0xfb, 0x4b, 0xdd, 0xd5, + 0xc3, 0x8e, 0xb8, 0x20, 0x24, 0x38, 0x71, 0xe0, 0x3f, 0x41, 0x08, 0xc1, 0x0d, 0x45, 0x88, 0xcb, + 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x39, 0x72, 0xc9, 0x05, 0x54, 0xd5, + 0xd5, 0xdf, 0xd5, 0x9e, 0xb1, 0x37, 0xeb, 0xa0, 0x68, 0x6f, 0x9e, 0xaa, 0xf7, 0x7b, 0xfd, 0xab, + 0xaa, 0x5f, 0xd5, 0x7b, 0x5d, 0x6d, 0x70, 0xef, 0xf8, 0xdb, 0xae, 0xaa, 0x59, 0xcd, 0x63, 0xaf, + 0x4b, 0x1c, 0x93, 0x50, 0xe2, 0x36, 0x87, 0xc4, 0xec, 0x5b, 0x4e, 0x53, 0x74, 0x60, 0x5b, 0x6b, + 0x62, 0xdb, 0x76, 0x9b, 0xc3, 0xcd, 0xe6, 0x80, 0x98, 0xc4, 0xc1, 0x94, 0xf4, 0x55, 0xdb, 0xb1, + 0xa8, 0x05, 0xa1, 0x8f, 0x51, 0xb1, 0xad, 0xa9, 0x0c, 0xa3, 0x0e, 0x37, 0xaf, 0xde, 0x1e, 0x68, + 0xf4, 0xc8, 0xeb, 0xaa, 0x3d, 0xcb, 0x68, 0x0e, 0xac, 0x81, 0xd5, 0xe4, 0xd0, 0xae, 0xf7, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x3e, 0xc5, 0xd5, 0x46, 0xec, 0x31, 0x3d, 0xcb, 0x21, 0x92, 0xc7, + 0x5c, 0xbd, 0x19, 0xc3, 0xd8, 0x96, 0xae, 0xf5, 0x46, 0xcd, 0xe1, 0x66, 0x97, 0x50, 0x9c, 0x85, + 0xde, 0x8d, 0xa0, 0x06, 0xee, 0x1d, 0x69, 0x26, 0x71, 0x46, 0x4d, 0xfb, 0x78, 0xc0, 0x1a, 0xdc, + 0xa6, 0x41, 0x28, 0x96, 0x3d, 0xa0, 0x99, 0x17, 0xe5, 0x78, 0x26, 0xd5, 0x0c, 0x92, 0x09, 0x78, + 0x73, 0x52, 0x80, 0xdb, 0x3b, 0x22, 0x06, 0xce, 0xc4, 0xbd, 0x9e, 0x17, 0xe7, 0x51, 0x4d, 0x6f, + 0x6a, 0x26, 0x75, 0xa9, 0x93, 0x0e, 0x6a, 0xfc, 0x47, 0x01, 0xb0, 0x6d, 0x99, 0xd4, 0xb1, 0x74, + 0x9d, 0x38, 0x88, 0x0c, 0x35, 0x57, 0xb3, 0x4c, 0xf8, 0x53, 0x50, 0x61, 0xe3, 0xe9, 0x63, 0x8a, + 0xab, 0xca, 0x75, 0x65, 0x63, 0xe1, 0xce, 0xb7, 0xd4, 0x68, 0x3d, 0x42, 0x7a, 0xd5, 0x3e, 0x1e, + 0xb0, 0x06, 0x57, 0x65, 0x68, 0x75, 0xb8, 0xa9, 0xee, 0x75, 0x3f, 0x20, 0x3d, 0xda, 0x21, 0x14, + 0xb7, 0xe0, 0x93, 0x93, 0xfa, 0xcc, 0xf8, 0xa4, 0x0e, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x3d, 0x50, + 0xe2, 0xec, 0x05, 0xce, 0x7e, 0x3b, 0x97, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3f, 0xa6, + 0xc4, 0x64, 0xe9, 0xb5, 0x2e, 0x09, 0xea, 0xd2, 0x36, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x1a, 0xa8, + 0x38, 0x22, 0xfd, 0x6a, 0xf1, 0xba, 0xb2, 0x51, 0x6c, 0x5d, 0x16, 0xa8, 0x4a, 0x30, 0x2c, 0x14, + 0x22, 0x1a, 0x7f, 0x55, 0xc0, 0x5a, 0x76, 0xdc, 0x3b, 0x9a, 0x4b, 0xe1, 0xfb, 0x99, 0xb1, 0xab, + 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x87, 0x0f, 0x0e, 0x5a, 0x62, 0xe3, 0x7e, 0x0f, 0x94, 0x35, + 0x4a, 0x0c, 0xb7, 0x5a, 0xb8, 0x5e, 0xdc, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0x96, 0xb9, 0x9a, 0x4d, + 0xac, 0xb5, 0x28, 0x28, 0xcb, 0xef, 0xb2, 0x60, 0xe4, 0x73, 0x34, 0xfe, 0xab, 0x80, 0xf9, 0x6d, + 0x4c, 0x0c, 0xcb, 0x3c, 0x20, 0xf4, 0x02, 0x16, 0xad, 0x0d, 0x4a, 0xae, 0x4d, 0x7a, 0x62, 0xd1, + 0xbe, 0x26, 0xcb, 0x3d, 0x4c, 0xe7, 0xc0, 0x26, 0xbd, 0x68, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, 0xe1, + 0x7b, 0x60, 0xd6, 0xa5, 0x98, 0x7a, 0x2e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, 0x43, + 0x5b, 0x4b, 0x82, 0x68, 0xd6, 0xff, 0x8d, 0x04, 0x45, 0xe3, 0x5f, 0x05, 0x00, 0x43, 0x6c, 0xdb, + 0x32, 0xfb, 0x1a, 0x65, 0xf5, 0xfb, 0x16, 0x28, 0xd1, 0x91, 0x4d, 0xf8, 0x34, 0xcc, 0xb7, 0x6e, + 0x04, 0x59, 0xdc, 0x1f, 0xd9, 0xe4, 0xb3, 0x93, 0xfa, 0x5a, 0x36, 0x82, 0xf5, 0x20, 0x1e, 0x03, + 0x77, 0xc2, 0xfc, 0x0a, 0x3c, 0xfa, 0x6e, 0xf2, 0xd1, 0x9f, 0x9d, 0xd4, 0x25, 0xe7, 0x8a, 0x1a, + 0x32, 0x25, 0x13, 0x84, 0x43, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, 0x36, 0x5d, 0xff, 0x49, 0x9a, + 0x41, 0xc4, 0xc8, 0x5f, 0x9d, 0x6e, 0x79, 0x58, 0x44, 0xeb, 0xaa, 0xc8, 0x02, 0xee, 0x64, 0xd8, + 0x90, 0xe4, 0x09, 0xf0, 0x06, 0x98, 0x75, 0x08, 0x76, 0x2d, 0xb3, 0x5a, 0xe2, 0xa3, 0x08, 0x27, + 0x10, 0xf1, 0x56, 0x24, 0x7a, 0xe1, 0x4d, 0x30, 0x67, 0x10, 0xd7, 0xc5, 0x03, 0x52, 0x2d, 0x73, + 0xe0, 0xb2, 0x00, 0xce, 0x75, 0xfc, 0x66, 0x14, 0xf4, 0x37, 0x7e, 0xaf, 0x80, 0xc5, 0x70, 0xe6, + 0x2e, 0x60, 0xab, 0xb4, 0x92, 0x5b, 0xe5, 0xe5, 0x53, 0xeb, 0x24, 0x67, 0x87, 0x7c, 0x58, 0x8c, + 0xe5, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0x54, 0x5c, 0xa2, 0x93, 0x1e, 0xb5, 0x1c, 0x91, 0xf3, 0xeb, + 0x53, 0xe6, 0x8c, 0xbb, 0x44, 0x3f, 0x10, 0xa1, 0xad, 0x4b, 0x2c, 0xe9, 0xe0, 0x17, 0x0a, 0x29, + 0xe1, 0x8f, 0x40, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x62, 0x9b, 0x24, 0xea, 0x9b, 0x95, 0x0b, + 0x23, 0xdb, 0xb7, 0xfa, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x70, 0x1e, 0x82, 0x56, 0x14, 0xd2, 0xc0, + 0x63, 0xb0, 0xe4, 0xd9, 0x7d, 0x86, 0xa4, 0xec, 0xe8, 0x1e, 0x8c, 0x44, 0xf9, 0xdc, 0x3a, 0x75, + 0x42, 0x0e, 0x13, 0x21, 0xad, 0x35, 0xf1, 0x80, 0xa5, 0x64, 0x3b, 0x4a, 0x51, 0xc3, 0x2d, 0xb0, + 0x6c, 0x68, 0x26, 0x22, 0xb8, 0x3f, 0x3a, 0x20, 0x3d, 0xcb, 0xec, 0xbb, 0xbc, 0x80, 0xca, 0xad, + 0x75, 0x41, 0xb0, 0xdc, 0x49, 0x76, 0xa3, 0x34, 0x1e, 0xee, 0x80, 0xd5, 0xe0, 0x9c, 0xfd, 0x81, + 0xe6, 0x52, 0xcb, 0x19, 0xed, 0x68, 0x86, 0x46, 0xab, 0xb3, 0x9c, 0xa7, 0x3a, 0x3e, 0xa9, 0xaf, + 0x22, 0x49, 0x3f, 0x92, 0x46, 0x35, 0x7e, 0x33, 0x0b, 0x96, 0x53, 0xa7, 0x01, 0x7c, 0x00, 0xd6, + 0x7a, 0x9e, 0xe3, 0x10, 0x93, 0xee, 0x7a, 0x46, 0x97, 0x38, 0x07, 0xbd, 0x23, 0xd2, 0xf7, 0x74, + 0xd2, 0xe7, 0x2b, 0x5a, 0x6e, 0xd5, 0x44, 0xae, 0x6b, 0x6d, 0x29, 0x0a, 0xe5, 0x44, 0xc3, 0x1f, + 0x02, 0x68, 0xf2, 0xa6, 0x8e, 0xe6, 0xba, 0x21, 0x67, 0x81, 0x73, 0x86, 0x1b, 0x70, 0x37, 0x83, + 0x40, 0x92, 0x28, 0x96, 0x63, 0x9f, 0xb8, 0x9a, 0x43, 0xfa, 0xe9, 0x1c, 0x8b, 0xc9, 0x1c, 0xb7, + 0xa5, 0x28, 0x94, 0x13, 0x0d, 0xdf, 0x00, 0x0b, 0xfe, 0xd3, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x11, + 0x64, 0x0b, 0xbb, 0x51, 0x17, 0x8a, 0xe3, 0xd8, 0xd0, 0xac, 0xae, 0x4b, 0x9c, 0x21, 0xe9, 0xbf, + 0xe3, 0x7b, 0x00, 0x26, 0x94, 0x65, 0x2e, 0x94, 0xe1, 0xd0, 0xf6, 0x32, 0x08, 0x24, 0x89, 0x62, + 0x43, 0xf3, 0xab, 0x26, 0x33, 0xb4, 0xd9, 0xe4, 0xd0, 0x0e, 0xa5, 0x28, 0x94, 0x13, 0xcd, 0x6a, + 0xcf, 0x4f, 0x79, 0x6b, 0x88, 0x35, 0x1d, 0x77, 0x75, 0x52, 0x9d, 0x4b, 0xd6, 0xde, 0x6e, 0xb2, + 0x1b, 0xa5, 0xf1, 0xf0, 0x1d, 0x70, 0xc5, 0x6f, 0x3a, 0x34, 0x71, 0x48, 0x52, 0xe1, 0x24, 0x2f, + 0x09, 0x92, 0x2b, 0xbb, 0x69, 0x00, 0xca, 0xc6, 0xc0, 0xb7, 0xc0, 0x52, 0xcf, 0xd2, 0x75, 0x5e, + 0x8f, 0x6d, 0xcb, 0x33, 0x69, 0x75, 0x9e, 0xb3, 0x40, 0xb6, 0x87, 0xda, 0x89, 0x1e, 0x94, 0x42, + 0xc2, 0x87, 0x00, 0xf4, 0x02, 0x39, 0x70, 0xab, 0x20, 0x5f, 0xe8, 0xb3, 0x3a, 0x14, 0x09, 0x70, + 0xd8, 0xe4, 0xa2, 0x18, 0x5b, 0xe3, 0x43, 0x05, 0xac, 0xe7, 0xec, 0x71, 0xf8, 0xbd, 0x84, 0xea, + 0xdd, 0x4a, 0xa9, 0xde, 0xb5, 0x9c, 0xb0, 0x98, 0xf4, 0xf5, 0xc0, 0x22, 0xf3, 0x1d, 0x9a, 0x39, + 0xf0, 0x21, 0xe2, 0x04, 0x7b, 0x55, 0x96, 0x3b, 0x8a, 0x03, 0xa3, 0x63, 0xf8, 0xca, 0xf8, 0xa4, + 0xbe, 0x98, 0xe8, 0x43, 0x49, 0xce, 0xc6, 0x2f, 0x0b, 0x00, 0x6c, 0x13, 0x5b, 0xb7, 0x46, 0x06, + 0x31, 0x2f, 0xc2, 0xb5, 0x6c, 0x27, 0x5c, 0x4b, 0x43, 0xba, 0x10, 0x61, 0x3e, 0xb9, 0xb6, 0x65, + 0x27, 0x65, 0x5b, 0xbe, 0x31, 0x81, 0xe7, 0x74, 0xdf, 0xf2, 0x8f, 0x22, 0x58, 0x89, 0xc0, 0x91, + 0x71, 0xb9, 0x97, 0x58, 0xc2, 0x57, 0x52, 0x4b, 0xb8, 0x2e, 0x09, 0x79, 0x6e, 0xce, 0xe5, 0xf3, + 0x77, 0x10, 0xf0, 0x03, 0xb0, 0xc4, 0xac, 0x8a, 0x5f, 0x08, 0xdc, 0x08, 0xcd, 0x9e, 0xd9, 0x08, + 0x85, 0x42, 0xb6, 0x93, 0x60, 0x42, 0x29, 0xe6, 0x1c, 0xe3, 0x35, 0xf7, 0xbc, 0x8d, 0x57, 0xe3, + 0x0f, 0x0a, 0x58, 0x8a, 0x96, 0xe9, 0x02, 0x6c, 0x52, 0x3b, 0x69, 0x93, 0x6a, 0xa7, 0xd7, 0x65, + 0x8e, 0x4f, 0xfa, 0x7b, 0x29, 0x9e, 0x35, 0x37, 0x4a, 0x1b, 0xec, 0x85, 0xca, 0xd6, 0xb5, 0x1e, + 0x76, 0x85, 0xac, 0x5e, 0xf2, 0x5f, 0xa6, 0xfc, 0x36, 0x14, 0xf6, 0x26, 0x2c, 0x55, 0xe1, 0xf9, + 0x5a, 0xaa, 0xe2, 0xe7, 0x63, 0xa9, 0xee, 0x83, 0x8a, 0x1b, 0x98, 0xa9, 0x12, 0xa7, 0xbc, 0x31, + 0x69, 0x3b, 0x0b, 0x1f, 0x15, 0xb2, 0x86, 0x0e, 0x2a, 0x64, 0x92, 0x79, 0xa7, 0xf2, 0x17, 0xe9, + 0x9d, 0xd8, 0x16, 0xb6, 0xb1, 0xe7, 0x92, 0x3e, 0xaf, 0xfb, 0x4a, 0xb4, 0x85, 0xf7, 0x79, 0x2b, + 0x12, 0xbd, 0xf0, 0x10, 0xac, 0xdb, 0x8e, 0x35, 0x70, 0x88, 0xeb, 0x6e, 0x13, 0xdc, 0xd7, 0x35, + 0x93, 0x04, 0x03, 0xf0, 0x55, 0xef, 0xda, 0xf8, 0xa4, 0xbe, 0xbe, 0x2f, 0x87, 0xa0, 0xbc, 0xd8, + 0xc6, 0x9f, 0x4b, 0xe0, 0x72, 0xfa, 0x44, 0xcc, 0x31, 0x22, 0xca, 0xb9, 0x8c, 0xc8, 0x6b, 0xb1, + 0x12, 0xf5, 0x5d, 0x5a, 0xec, 0x9d, 0x3f, 0x53, 0xa6, 0x5b, 0x60, 0x59, 0x18, 0x8f, 0xa0, 0x53, + 0x58, 0xb1, 0x70, 0x79, 0x0e, 0x93, 0xdd, 0x28, 0x8d, 0x67, 0xf6, 0x22, 0x72, 0x0d, 0x01, 0x49, + 0x29, 0x69, 0x2f, 0xb6, 0xd2, 0x00, 0x94, 0x8d, 0x81, 0x1d, 0xb0, 0xe2, 0x99, 0x59, 0x2a, 0xbf, + 0x5c, 0xae, 0x09, 0xaa, 0x95, 0xc3, 0x2c, 0x04, 0xc9, 0xe2, 0xe0, 0x8f, 0x13, 0x8e, 0x63, 0x96, + 0x1f, 0x04, 0xaf, 0x9c, 0x5e, 0xd1, 0x53, 0x5b, 0x0e, 0x78, 0x0f, 0x2c, 0x3a, 0xdc, 0x50, 0x06, + 0x59, 0xfa, 0xa6, 0xec, 0x2b, 0x22, 0x6c, 0x11, 0xc5, 0x3b, 0x51, 0x12, 0x2b, 0xf1, 0x51, 0x95, + 0x69, 0x7d, 0x54, 0xe3, 0x4f, 0x0a, 0x80, 0xd9, 0x2d, 0x38, 0xf1, 0xe5, 0x3e, 0x13, 0x11, 0x93, + 0xc8, 0xbe, 0xdc, 0xe1, 0xdc, 0x9a, 0xec, 0x70, 0xa2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, + 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x51, 0x3e, 0xcf, 0x66, 0x71, 0x62, 0x3c, 0xa7, 0x5b, 0x9c, + 0x7f, 0x17, 0xc0, 0x4a, 0x04, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0x5e, 0x5c, 0xce, 0x4c, 0xbe, 0x9c, + 0x61, 0xb6, 0x23, 0x9a, 0xba, 0xff, 0x13, 0xdb, 0x11, 0x25, 0x94, 0x63, 0x3b, 0x7e, 0x57, 0x88, + 0x67, 0xfd, 0xa5, 0xb7, 0x1d, 0xcf, 0x7e, 0xb9, 0xd2, 0xf8, 0x4b, 0x11, 0x5c, 0x4e, 0x6f, 0xc1, + 0x84, 0x0e, 0x2a, 0x13, 0x75, 0x70, 0x1f, 0xac, 0x3e, 0xf2, 0x74, 0x7d, 0xc4, 0xa7, 0x21, 0x26, + 0x86, 0xbe, 0x82, 0x7e, 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0x1c, 0x4d, 0x2f, + 0x9e, 0x4b, 0xd3, 0x33, 0x6a, 0x53, 0x3a, 0x83, 0xda, 0x48, 0xf5, 0xb9, 0x7c, 0x0e, 0x7d, 0x9e, + 0x5a, 0x50, 0x25, 0xc7, 0xd5, 0xc4, 0x77, 0xf8, 0x5f, 0x2b, 0x60, 0x4d, 0xfe, 0xfa, 0x0c, 0x75, + 0xb0, 0x64, 0xe0, 0xc7, 0xf1, 0xcb, 0x8b, 0x49, 0x82, 0xe1, 0x51, 0x4d, 0x57, 0xfd, 0xaf, 0x3b, + 0xea, 0xbb, 0x26, 0xdd, 0x73, 0x0e, 0xa8, 0xa3, 0x99, 0x03, 0x5f, 0x60, 0x3b, 0x09, 0x2e, 0x94, + 0xe2, 0x6e, 0x7c, 0xa2, 0x80, 0xf5, 0x1c, 0x95, 0xbb, 0xd8, 0x4c, 0xe0, 0x43, 0x50, 0x31, 0xf0, + 0xe3, 0x03, 0xcf, 0x19, 0x04, 0x92, 0x7c, 0xf6, 0xe7, 0xf0, 0x8d, 0xdc, 0x11, 0x2c, 0x28, 0xe4, + 0x6b, 0xec, 0x81, 0xeb, 0x89, 0x41, 0xb2, 0x4d, 0x43, 0x1e, 0x79, 0x3a, 0xdf, 0x3f, 0xc2, 0x53, + 0xdc, 0x02, 0xf3, 0x36, 0x76, 0xa8, 0x16, 0x9a, 0xd1, 0x72, 0x6b, 0x71, 0x7c, 0x52, 0x9f, 0xdf, + 0x0f, 0x1a, 0x51, 0xd4, 0xdf, 0xf8, 0x55, 0x01, 0x2c, 0xc4, 0x48, 0x2e, 0x40, 0xdf, 0xdf, 0x4e, + 0xe8, 0xbb, 0xf4, 0x8b, 0x49, 0x7c, 0x54, 0x79, 0x02, 0xdf, 0x49, 0x09, 0xfc, 0x37, 0x27, 0x11, + 0x9d, 0xae, 0xf0, 0x9f, 0x16, 0xc0, 0x6a, 0x0c, 0x1d, 0x49, 0xfc, 0x77, 0x12, 0x12, 0xbf, 0x91, + 0x92, 0xf8, 0xaa, 0x2c, 0xe6, 0x85, 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x36, 0x77, + 0x17, 0x20, 0xf2, 0xdb, 0x49, 0x91, 0xaf, 0x4f, 0xa8, 0x97, 0x1c, 0x95, 0x7f, 0x52, 0x4e, 0xe4, + 0xfd, 0xa5, 0x97, 0xf9, 0x9f, 0x83, 0xd5, 0xa1, 0xa5, 0x7b, 0x06, 0x69, 0xeb, 0x58, 0x33, 0x02, + 0x00, 0x53, 0x32, 0x36, 0x89, 0x37, 0xa5, 0xf4, 0xc4, 0x71, 0x35, 0x97, 0x12, 0x93, 0x3e, 0x88, + 0x22, 0x23, 0x2d, 0x7e, 0x20, 0xa1, 0x43, 0xd2, 0x87, 0xc0, 0x37, 0xc0, 0x02, 0xd3, 0x54, 0xad, + 0x47, 0x76, 0xb1, 0x11, 0xd4, 0x54, 0xf8, 0x7d, 0xe0, 0x20, 0xea, 0x42, 0x71, 0x1c, 0x3c, 0x02, + 0x2b, 0xb6, 0xd5, 0xef, 0x60, 0x13, 0x0f, 0x08, 0x3b, 0xff, 0xf7, 0xf9, 0xff, 0x42, 0xf0, 0x7b, + 0x87, 0xf9, 0xd6, 0x9b, 0xc1, 0x0b, 0xe9, 0x7e, 0x16, 0xc2, 0x3c, 0xbb, 0xa4, 0x99, 0xef, 0x67, + 0x19, 0x25, 0x34, 0x32, 0x9f, 0xb3, 0xe6, 0x32, 0xff, 0x03, 0x20, 0x2b, 0xae, 0x73, 0x7e, 0xd0, + 0xca, 0xbb, 0x51, 0xa9, 0x9c, 0xeb, 0x6b, 0xd4, 0xa7, 0x25, 0x70, 0x25, 0x73, 0x40, 0x7e, 0x81, + 0x77, 0x1a, 0x19, 0xb7, 0x54, 0x3c, 0x83, 0x5b, 0xda, 0x02, 0xcb, 0xe2, 0x43, 0x58, 0xca, 0x6c, + 0x85, 0x76, 0xb4, 0x9d, 0xec, 0x46, 0x69, 0xbc, 0xec, 0x4e, 0xa5, 0x7c, 0xc6, 0x3b, 0x95, 0x78, + 0x16, 0xe2, 0xff, 0x37, 0xfc, 0xaa, 0xcb, 0x66, 0x21, 0xfe, 0x8d, 0x23, 0x8d, 0x87, 0xdf, 0x0d, + 0x4a, 0x2a, 0x64, 0x98, 0xe3, 0x0c, 0xa9, 0x1a, 0x09, 0x09, 0x52, 0xe8, 0x67, 0xfa, 0xd8, 0xf3, + 0xbe, 0xe4, 0x63, 0xcf, 0xc6, 0x84, 0x52, 0x9e, 0xde, 0x2a, 0xfe, 0x4d, 0x01, 0x2f, 0xe5, 0xee, + 0x01, 0xb8, 0x95, 0xd0, 0xd9, 0xdb, 0x29, 0x9d, 0x7d, 0x39, 0x37, 0x30, 0x26, 0xb6, 0x86, 0xfc, + 0x42, 0xe4, 0xee, 0xc4, 0x0b, 0x11, 0x89, 0x8b, 0x9a, 0x7c, 0x33, 0xd2, 0xda, 0x78, 0xf2, 0xb4, + 0x36, 0xf3, 0xd1, 0xd3, 0xda, 0xcc, 0xc7, 0x4f, 0x6b, 0x33, 0xbf, 0x18, 0xd7, 0x94, 0x27, 0xe3, + 0x9a, 0xf2, 0xd1, 0xb8, 0xa6, 0x7c, 0x3c, 0xae, 0x29, 0xff, 0x1c, 0xd7, 0x94, 0xdf, 0x7e, 0x52, + 0x9b, 0x79, 0x58, 0x18, 0x6e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x70, 0x5f, 0xbf, 0x58, 0x3d, + 0x26, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 8fa54afd..c0499d32 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -31,6 +31,38 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. @@ -52,6 +84,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -67,9 +120,8 @@ message DaemonSetList { message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. @@ -143,6 +195,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. @@ -160,6 +218,262 @@ message DaemonSetUpdateStrategy { optional RollingUpdateDaemonSet rollingUpdate = 2; } +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + // Spec to control the desired behavior of daemon set rolling update. message RollingUpdateDaemonSet { // The maximum number of DaemonSet pods that can be unavailable during the @@ -179,3 +493,209 @@ message RollingUpdateDaemonSet { // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; } + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go index 586b53ef..02710104 100644 --- a/vendor/k8s.io/api/apps/v1/register.go +++ b/vendor/k8s.io/api/apps/v1/register.go @@ -41,11 +41,19 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &StatefulSet{}, + &StatefulSetList{}, &DaemonSet{}, &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index dc589e65..f32300fe 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -19,14 +19,440 @@ package v1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) const ( ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. type DaemonSetUpdateStrategy struct { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. @@ -76,10 +502,9 @@ type RollingUpdateDaemonSet struct { type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -152,6 +577,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -199,3 +651,169 @@ type DaemonSetList struct { // A list of daemon sets. Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 4cc96dc4..76305393 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -27,6 +27,27 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_ControllerRevision = map[string]string{ + "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -38,6 +59,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -50,7 +84,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", @@ -72,6 +106,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -88,6 +123,143 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { return map_DaemonSetUpdateStrategy } +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", @@ -97,4 +269,97 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { return map_RollingUpdateDaemonSet } +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 72d654a5..c41db298 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -21,48 +21,72 @@ limitations under the License. package v1 import ( + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -94,6 +118,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -176,6 +217,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -214,6 +262,336 @@ func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { *out = *in @@ -238,3 +616,251 @@ func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 158f0000..6047ed50 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 6d5e1f7a..baee7a97 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -41,6 +41,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -144,22 +145,26 @@ func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func init() { @@ -179,6 +184,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") @@ -840,6 +846,48 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -858,11 +906,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n24, err := m.ListMeta.MarshalTo(dAtA[i:]) + n25, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -902,20 +950,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n25, err := m.Selector.MarshalTo(dAtA[i:]) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n26, err := m.Template.MarshalTo(dAtA[i:]) + n27, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -939,11 +987,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n27, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -997,6 +1045,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1023,11 +1083,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n28, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } return i, nil } @@ -1286,6 +1346,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1347,6 +1423,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1591,6 +1673,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -1632,6 +1728,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4017,6 +4114,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4603,6 +4896,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4846,119 +5170,122 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0xd6, 0x50, 0xa4, 0x44, 0xb6, 0x56, 0x94, 0xd5, 0x52, 0x24, 0xae, 0x36, 0xa1, 0x04, 0x66, - 0xb1, 0x2b, 0xef, 0xae, 0x86, 0x6b, 0xd9, 0x31, 0xfc, 0x00, 0x8c, 0x88, 0x94, 0x63, 0xcb, 0x90, - 0x22, 0xb9, 0x29, 0x39, 0x88, 0x93, 0x00, 0x6e, 0x0e, 0xdb, 0xd4, 0x58, 0xf3, 0xc2, 0x4c, 0x0f, - 0x63, 0x22, 0x97, 0xdc, 0x13, 0xc0, 0x39, 0xe7, 0x57, 0xe4, 0x18, 0x24, 0xb7, 0x9c, 0x74, 0x09, - 0x60, 0xe4, 0x12, 0x9f, 0x84, 0x98, 0xbe, 0xe6, 0x9a, 0x8b, 0x81, 0x00, 0x41, 0xf7, 0xf4, 0xbc, - 0x67, 0x24, 0x2a, 0x40, 0x74, 0xc8, 0x8d, 0xd3, 0x55, 0xf5, 0x55, 0x75, 0x77, 0x75, 0xd5, 0x57, - 0x04, 0x3f, 0x3c, 0xb9, 0xe3, 0xc8, 0xaa, 0xd9, 0x3c, 0x71, 0xbb, 0xc4, 0x36, 0x08, 0x25, 0x4e, - 0x73, 0x40, 0x8c, 0x9e, 0x69, 0x37, 0x85, 0x00, 0x5b, 0x6a, 0x13, 0x5b, 0x96, 0xd3, 0x1c, 0xdc, - 0xe8, 0x12, 0x8a, 0x6f, 0x34, 0xfb, 0xc4, 0x20, 0x36, 0xa6, 0xa4, 0x27, 0x5b, 0xb6, 0x49, 0x4d, - 0xb8, 0xec, 0x29, 0xca, 0xd8, 0x52, 0x65, 0xa6, 0x28, 0x0b, 0xc5, 0x95, 0x8d, 0xbe, 0x4a, 0x8f, - 0xdd, 0xae, 0xac, 0x98, 0x7a, 0xb3, 0x6f, 0xf6, 0xcd, 0x26, 0xd7, 0xef, 0xba, 0x2f, 0xf9, 0x17, - 0xff, 0xe0, 0xbf, 0x3c, 0x9c, 0x95, 0x46, 0xc4, 0xa1, 0x62, 0xda, 0xa4, 0x39, 0x48, 0xf9, 0x5a, - 0xb9, 0x1e, 0xd1, 0xb1, 0x4c, 0x4d, 0x55, 0x86, 0x79, 0x61, 0xad, 0xdc, 0x0a, 0x55, 0x75, 0xac, - 0x1c, 0xab, 0x06, 0xb1, 0x87, 0x4d, 0xeb, 0xa4, 0xcf, 0x16, 0x9c, 0xa6, 0x4e, 0x28, 0xce, 0x72, - 0xd0, 0xcc, 0xb3, 0xb2, 0x5d, 0x83, 0xaa, 0x3a, 0x49, 0x19, 0xdc, 0xbe, 0xc8, 0xc0, 0x51, 0x8e, - 0x89, 0x8e, 0x53, 0x76, 0x37, 0xf3, 0xec, 0x5c, 0xaa, 0x6a, 0x4d, 0xd5, 0xa0, 0x0e, 0xb5, 0x93, - 0x46, 0x8d, 0x7f, 0x49, 0x00, 0xb6, 0x4d, 0x83, 0xda, 0xa6, 0xa6, 0x11, 0x1b, 0x91, 0x81, 0xea, - 0xa8, 0xa6, 0x01, 0x5f, 0x80, 0x32, 0xdb, 0x4f, 0x0f, 0x53, 0x5c, 0x93, 0xd6, 0xa4, 0xf5, 0x99, - 0xcd, 0x6f, 0xe5, 0xf0, 0x52, 0x02, 0x78, 0xd9, 0x3a, 0xe9, 0xb3, 0x05, 0x47, 0x66, 0xda, 0xf2, - 0xe0, 0x86, 0xbc, 0xdf, 0x7d, 0x45, 0x14, 0xba, 0x47, 0x28, 0x6e, 0xc1, 0xd3, 0xb3, 0xd5, 0x89, - 0xd1, 0xd9, 0x2a, 0x08, 0xd7, 0x50, 0x80, 0x0a, 0xf7, 0x41, 0x91, 0xa3, 0x17, 0x38, 0xfa, 0x46, - 0x2e, 0xba, 0xd8, 0xb4, 0x8c, 0xf0, 0x2f, 0x1f, 0xbe, 0xa6, 0xc4, 0x60, 0xe1, 0xb5, 0x3e, 0x11, - 0xd0, 0xc5, 0x6d, 0x4c, 0x31, 0xe2, 0x40, 0xf0, 0x1b, 0x50, 0xb6, 0x45, 0xf8, 0xb5, 0xc9, 0x35, - 0x69, 0x7d, 0xb2, 0x75, 0x4d, 0x68, 0x95, 0xfd, 0x6d, 0xa1, 0x40, 0xa3, 0x71, 0x2a, 0x81, 0xa5, - 0xf4, 0xbe, 0x77, 0x55, 0x87, 0xc2, 0x9f, 0xa7, 0xf6, 0x2e, 0x8f, 0xb7, 0x77, 0x66, 0xcd, 0x77, - 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xfb, 0x3e, 0x00, 0x25, 0x95, 0x12, 0xdd, 0xa9, 0x15, 0xd6, 0x26, - 0xd7, 0x67, 0x36, 0xbf, 0x96, 0x73, 0x72, 0x5d, 0x4e, 0x47, 0xd7, 0x9a, 0x15, 0xb8, 0xa5, 0x1d, - 0x86, 0x80, 0x3c, 0xa0, 0xc6, 0x6f, 0x0b, 0x00, 0x6c, 0x13, 0x4b, 0x33, 0x87, 0x3a, 0x31, 0xe8, - 0x15, 0x5c, 0xdd, 0x0e, 0x28, 0x3a, 0x16, 0x51, 0xc4, 0xd5, 0x7d, 0x99, 0xbb, 0x83, 0x30, 0xa8, - 0x8e, 0x45, 0x94, 0xf0, 0xd2, 0xd8, 0x17, 0xe2, 0x10, 0xf0, 0x29, 0x98, 0x72, 0x28, 0xa6, 0xae, - 0xc3, 0xaf, 0x6c, 0x66, 0xf3, 0xfa, 0x38, 0x60, 0xdc, 0xa0, 0x55, 0x15, 0x70, 0x53, 0xde, 0x37, - 0x12, 0x40, 0x8d, 0xbf, 0x4f, 0x82, 0x85, 0x50, 0xb9, 0x6d, 0x1a, 0x3d, 0x95, 0xb2, 0x94, 0xbe, - 0x0f, 0x8a, 0x74, 0x68, 0x11, 0x7e, 0x26, 0x95, 0xd6, 0x97, 0x7e, 0x30, 0x87, 0x43, 0x8b, 0x7c, - 0x3c, 0x5b, 0x5d, 0xce, 0x30, 0x61, 0x22, 0xc4, 0x8d, 0xe0, 0x6e, 0x10, 0x67, 0x81, 0x9b, 0xdf, - 0x8a, 0x3b, 0xff, 0x78, 0xb6, 0x9a, 0x51, 0x6b, 0xe4, 0x00, 0x29, 0x1e, 0x22, 0xfc, 0x02, 0x4c, - 0xd9, 0x04, 0x3b, 0xa6, 0x51, 0x2b, 0x72, 0xb4, 0x60, 0x2b, 0x88, 0xaf, 0x22, 0x21, 0x85, 0xd7, - 0xc1, 0xb4, 0x4e, 0x1c, 0x07, 0xf7, 0x49, 0xad, 0xc4, 0x15, 0xe7, 0x84, 0xe2, 0xf4, 0x9e, 0xb7, - 0x8c, 0x7c, 0x39, 0x7c, 0x05, 0xaa, 0x1a, 0x76, 0xe8, 0x91, 0xd5, 0xc3, 0x94, 0x1c, 0xaa, 0x3a, - 0xa9, 0x4d, 0xf1, 0x03, 0xfd, 0x6a, 0xbc, 0xbb, 0x67, 0x16, 0xad, 0x25, 0x81, 0x5e, 0xdd, 0x8d, - 0x21, 0xa1, 0x04, 0x32, 0x1c, 0x00, 0xc8, 0x56, 0x0e, 0x6d, 0x6c, 0x38, 0xde, 0x41, 0x31, 0x7f, - 0xd3, 0x97, 0xf6, 0xb7, 0x22, 0xfc, 0xc1, 0xdd, 0x14, 0x1a, 0xca, 0xf0, 0xd0, 0xf8, 0xa3, 0x04, - 0xaa, 0xe1, 0x35, 0x5d, 0xc1, 0x5b, 0x7d, 0x1c, 0x7f, 0xab, 0xdf, 0x1f, 0x23, 0x39, 0x73, 0xde, - 0xe8, 0x3f, 0x0b, 0x00, 0x86, 0x4a, 0xc8, 0xd4, 0xb4, 0x2e, 0x56, 0x4e, 0xe0, 0x1a, 0x28, 0x1a, - 0x58, 0xf7, 0x73, 0x32, 0x78, 0x20, 0x3f, 0xc6, 0x3a, 0x41, 0x5c, 0x02, 0xdf, 0x48, 0x00, 0xba, - 0xfc, 0xe8, 0x7b, 0x5b, 0x86, 0x61, 0x52, 0xcc, 0x4e, 0xc3, 0x0f, 0xa8, 0x3d, 0x46, 0x40, 0xbe, - 0x2f, 0xf9, 0x28, 0x85, 0xf2, 0xd0, 0xa0, 0xf6, 0x30, 0xbc, 0x85, 0xb4, 0x02, 0xca, 0x70, 0x0d, - 0x7f, 0x06, 0x80, 0x2d, 0x30, 0x0f, 0x4d, 0xf1, 0x6c, 0xf3, 0x6b, 0x80, 0xef, 0xbe, 0x6d, 0x1a, - 0x2f, 0xd5, 0x7e, 0x58, 0x58, 0x50, 0x00, 0x81, 0x22, 0x70, 0x2b, 0x0f, 0xc1, 0x72, 0x4e, 0x9c, - 0xf0, 0x1a, 0x98, 0x3c, 0x21, 0x43, 0xef, 0xa8, 0x10, 0xfb, 0x09, 0x17, 0x41, 0x69, 0x80, 0x35, - 0x97, 0x78, 0x6f, 0x12, 0x79, 0x1f, 0xf7, 0x0a, 0x77, 0xa4, 0xc6, 0x1f, 0x4a, 0xd1, 0x4c, 0x61, - 0xf5, 0x06, 0xae, 0xb3, 0xf6, 0x60, 0x69, 0xaa, 0x82, 0x1d, 0x8e, 0x51, 0x6a, 0x7d, 0xe2, 0xb5, - 0x06, 0x6f, 0x0d, 0x05, 0x52, 0xf8, 0x0b, 0x50, 0x76, 0x88, 0x46, 0x14, 0x6a, 0xda, 0xa2, 0xc4, - 0xdd, 0x1c, 0x33, 0xa7, 0x70, 0x97, 0x68, 0x1d, 0x61, 0xea, 0xc1, 0xfb, 0x5f, 0x28, 0x80, 0x84, - 0x4f, 0x41, 0x99, 0x12, 0xdd, 0xd2, 0x30, 0x25, 0xe2, 0xf4, 0x62, 0x79, 0xc5, 0x6a, 0x07, 0x03, - 0x3b, 0x30, 0x7b, 0x87, 0x42, 0x8d, 0x57, 0xcf, 0x20, 0x4f, 0xfd, 0x55, 0x14, 0xc0, 0xc0, 0x9f, - 0x82, 0xb2, 0x43, 0x59, 0x57, 0xef, 0x0f, 0x79, 0x45, 0x39, 0xaf, 0xad, 0x44, 0xeb, 0xa8, 0x67, - 0x12, 0x42, 0xfb, 0x2b, 0x28, 0x80, 0x83, 0x5b, 0x60, 0x4e, 0x57, 0x0d, 0x44, 0x70, 0x6f, 0xd8, - 0x21, 0x8a, 0x69, 0xf4, 0x1c, 0x5e, 0x8a, 0x4a, 0xad, 0x65, 0x61, 0x34, 0xb7, 0x17, 0x17, 0xa3, - 0xa4, 0x3e, 0xdc, 0x05, 0x8b, 0x7e, 0xdb, 0x7d, 0xac, 0x3a, 0xd4, 0xb4, 0x87, 0xbb, 0xaa, 0xae, - 0x52, 0x5e, 0xa0, 0x4a, 0xad, 0xda, 0xe8, 0x6c, 0x75, 0x11, 0x65, 0xc8, 0x51, 0xa6, 0x15, 0xab, - 0x9d, 0x16, 0x76, 0x1d, 0xd2, 0xe3, 0x05, 0xa7, 0x1c, 0xd6, 0xce, 0x03, 0xbe, 0x8a, 0x84, 0x14, - 0xfe, 0x24, 0x96, 0xa6, 0xe5, 0xcb, 0xa5, 0x69, 0x35, 0x3f, 0x45, 0xe1, 0x11, 0x58, 0xb6, 0x6c, - 0xb3, 0x6f, 0x13, 0xc7, 0xd9, 0x26, 0xb8, 0xa7, 0xa9, 0x06, 0xf1, 0x4f, 0xa6, 0xc2, 0x77, 0xf4, - 0xd9, 0xe8, 0x6c, 0x75, 0xf9, 0x20, 0x5b, 0x05, 0xe5, 0xd9, 0x36, 0xfe, 0x52, 0x04, 0xd7, 0x92, - 0x3d, 0x0e, 0x3e, 0x01, 0xd0, 0xec, 0x3a, 0xc4, 0x1e, 0x90, 0xde, 0x23, 0x8f, 0xb8, 0x31, 0x76, - 0x23, 0x71, 0x76, 0x13, 0xbc, 0xdb, 0xfd, 0x94, 0x06, 0xca, 0xb0, 0xf2, 0xf8, 0x91, 0x78, 0x00, - 0x05, 0x1e, 0x68, 0x84, 0x1f, 0xa5, 0x1e, 0xc1, 0x16, 0x98, 0x13, 0x6f, 0xdf, 0x17, 0xf2, 0x64, - 0x8d, 0xdc, 0xfb, 0x51, 0x5c, 0x8c, 0x92, 0xfa, 0xf0, 0x11, 0x98, 0xc7, 0x03, 0xac, 0x6a, 0xb8, - 0xab, 0x91, 0x00, 0xa4, 0xc8, 0x41, 0x3e, 0x15, 0x20, 0xf3, 0x5b, 0x49, 0x05, 0x94, 0xb6, 0x81, - 0x7b, 0x60, 0xc1, 0x35, 0xd2, 0x50, 0x5e, 0x1e, 0x7e, 0x26, 0xa0, 0x16, 0x8e, 0xd2, 0x2a, 0x28, - 0xcb, 0x0e, 0xbe, 0x00, 0x40, 0xf1, 0x1b, 0xb3, 0x53, 0x9b, 0xe2, 0x95, 0xf4, 0x9b, 0x31, 0xde, - 0x4b, 0xd0, 0xcd, 0xc3, 0x2a, 0x16, 0x2c, 0x39, 0x28, 0x82, 0x09, 0xef, 0x83, 0x59, 0x9b, 0xbd, - 0x80, 0x20, 0xd4, 0x69, 0x1e, 0xea, 0x77, 0x84, 0xd9, 0x2c, 0x8a, 0x0a, 0x51, 0x5c, 0x17, 0xde, - 0x03, 0x55, 0xc5, 0xd4, 0x34, 0x9e, 0xf9, 0x6d, 0xd3, 0x35, 0x28, 0x4f, 0xde, 0x52, 0x0b, 0xb2, - 0xce, 0xdc, 0x8e, 0x49, 0x50, 0x42, 0xb3, 0xf1, 0x67, 0x29, 0xda, 0x66, 0xfc, 0xe7, 0x0c, 0xef, - 0xc5, 0xa8, 0xcf, 0x17, 0x09, 0xea, 0xb3, 0x94, 0xb6, 0x88, 0x30, 0x1f, 0x15, 0xcc, 0xb2, 0xe4, - 0x57, 0x8d, 0xbe, 0x77, 0xe1, 0xa2, 0x24, 0x7e, 0x7b, 0xee, 0x53, 0x0a, 0xb4, 0x23, 0x8d, 0x71, - 0x9e, 0xef, 0x3c, 0x2a, 0x44, 0x71, 0xe4, 0xc6, 0x03, 0x50, 0x8d, 0xbf, 0xc3, 0x18, 0xa7, 0x97, - 0x2e, 0xe4, 0xf4, 0x1f, 0x24, 0xb0, 0x9c, 0xe3, 0x1d, 0x6a, 0xa0, 0xaa, 0xe3, 0xd7, 0x91, 0x1c, - 0xb9, 0x90, 0x1b, 0xb3, 0xa9, 0x49, 0xf6, 0xa6, 0x26, 0x79, 0xc7, 0xa0, 0xfb, 0x76, 0x87, 0xda, - 0xaa, 0xd1, 0xf7, 0xee, 0x61, 0x2f, 0x86, 0x85, 0x12, 0xd8, 0xf0, 0x39, 0x28, 0xeb, 0xf8, 0x75, - 0xc7, 0xb5, 0xfb, 0x59, 0xe7, 0x35, 0x9e, 0x1f, 0xde, 0x3f, 0xf6, 0x04, 0x0a, 0x0a, 0xf0, 0x1a, - 0xfb, 0x60, 0x2d, 0xb6, 0x49, 0x56, 0x2a, 0xc8, 0x4b, 0x57, 0xeb, 0x90, 0xf0, 0xc2, 0xbf, 0x06, - 0x15, 0x0b, 0xdb, 0x54, 0x0d, 0xca, 0x45, 0xa9, 0x35, 0x3b, 0x3a, 0x5b, 0xad, 0x1c, 0xf8, 0x8b, - 0x28, 0x94, 0x37, 0xfe, 0x2d, 0x81, 0x52, 0x47, 0xc1, 0x1a, 0xb9, 0x82, 0xd1, 0x61, 0x3b, 0x36, - 0x3a, 0x34, 0x72, 0x93, 0x88, 0xc7, 0x93, 0x3b, 0x35, 0xec, 0x26, 0xa6, 0x86, 0xcf, 0x2f, 0xc0, - 0x39, 0x7f, 0x60, 0xb8, 0x0b, 0x2a, 0x81, 0xbb, 0x58, 0x95, 0x94, 0x2e, 0xaa, 0x92, 0x8d, 0xdf, - 0x17, 0xc0, 0x4c, 0xc4, 0xc5, 0xe5, 0xac, 0xd9, 0x71, 0x47, 0x88, 0x06, 0x2b, 0x43, 0x9b, 0xe3, - 0x6c, 0x44, 0xf6, 0x49, 0x85, 0xc7, 0xdf, 0xc2, 0xee, 0x9d, 0xe6, 0x1a, 0x0f, 0x40, 0x95, 0x62, - 0xbb, 0x4f, 0xa8, 0x2f, 0xe3, 0x07, 0x56, 0x09, 0x99, 0xfe, 0x61, 0x4c, 0x8a, 0x12, 0xda, 0x2b, - 0xf7, 0xc1, 0x6c, 0xcc, 0xd9, 0xa5, 0x48, 0xd8, 0x1b, 0x76, 0x38, 0x61, 0x72, 0x5e, 0x41, 0x76, - 0x3d, 0x89, 0x65, 0xd7, 0x7a, 0xfe, 0x61, 0x46, 0x9e, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0x7d, - 0x35, 0x16, 0xda, 0xf9, 0x99, 0xf6, 0x27, 0x09, 0xcc, 0x45, 0xb4, 0xaf, 0x60, 0x82, 0xd9, 0x89, - 0x4f, 0x30, 0x9f, 0x8f, 0xb3, 0x89, 0x9c, 0x11, 0xe6, 0xaf, 0xa5, 0x58, 0xf0, 0xff, 0xf7, 0xa4, - 0xfa, 0x57, 0x60, 0x71, 0x60, 0x6a, 0xae, 0x4e, 0xda, 0x1a, 0x56, 0x75, 0x5f, 0x81, 0x31, 0x98, - 0xc9, 0xe4, 0x1f, 0x15, 0x01, 0x3c, 0xb1, 0x1d, 0xd5, 0xa1, 0xc4, 0xa0, 0xcf, 0x42, 0xcb, 0xd6, - 0x77, 0x85, 0x93, 0xc5, 0x67, 0x19, 0x70, 0x28, 0xd3, 0x09, 0xfc, 0x01, 0x98, 0x61, 0x04, 0x4e, - 0x55, 0x08, 0x9b, 0x05, 0xc5, 0xf4, 0xbf, 0x20, 0x80, 0x66, 0x3a, 0xa1, 0x08, 0x45, 0xf5, 0xe0, - 0x31, 0x58, 0xb0, 0xcc, 0xde, 0x1e, 0x36, 0x70, 0x9f, 0xb0, 0xb6, 0x77, 0xc0, 0xff, 0xd0, 0xe4, - 0x4c, 0xbb, 0xd2, 0xba, 0xed, 0x33, 0xa5, 0x83, 0xb4, 0xca, 0x47, 0x46, 0x59, 0xd3, 0xcb, 0x9c, - 0x07, 0x64, 0x41, 0x42, 0x1b, 0x54, 0x5d, 0xd1, 0x7e, 0xc4, 0xe0, 0xe1, 0xcd, 0xff, 0x9b, 0xe3, - 0x64, 0xd8, 0x51, 0xcc, 0x32, 0xac, 0x46, 0xf1, 0x75, 0x94, 0xf0, 0x90, 0x3b, 0x48, 0x94, 0xff, - 0x9b, 0x41, 0xa2, 0xf1, 0x9b, 0x22, 0x98, 0x4f, 0x3d, 0x5d, 0xf8, 0xa3, 0x73, 0x18, 0xf7, 0xd2, - 0xff, 0x8c, 0x6d, 0xa7, 0x08, 0xe3, 0xe4, 0x25, 0x08, 0xe3, 0x16, 0x98, 0x53, 0x5c, 0xdb, 0x66, - 0xb3, 0x7e, 0x9c, 0x65, 0x07, 0x54, 0xbd, 0x1d, 0x17, 0xa3, 0xa4, 0x7e, 0x16, 0xdb, 0x2f, 0x5d, - 0x92, 0xed, 0x47, 0xa3, 0x10, 0x8c, 0xcd, 0x4b, 0xbb, 0x74, 0x14, 0x82, 0xb8, 0x25, 0xf5, 0x59, - 0xb7, 0xf2, 0x50, 0x03, 0x84, 0xe9, 0x78, 0xb7, 0x3a, 0x8a, 0x49, 0x51, 0x42, 0x3b, 0x83, 0x39, - 0x57, 0xc6, 0x66, 0xce, 0x7f, 0x93, 0xc0, 0xa7, 0xb9, 0x19, 0x0a, 0xb7, 0x62, 0x04, 0x7a, 0x23, - 0x41, 0xa0, 0xbf, 0x97, 0x6b, 0x18, 0xe1, 0xd1, 0x76, 0x36, 0x8f, 0xbe, 0x3b, 0x1e, 0x8f, 0xce, - 0x20, 0x79, 0x17, 0x13, 0xea, 0xd6, 0xc6, 0xe9, 0xfb, 0xfa, 0xc4, 0xdb, 0xf7, 0xf5, 0x89, 0x77, - 0xef, 0xeb, 0x13, 0xbf, 0x1e, 0xd5, 0xa5, 0xd3, 0x51, 0x5d, 0x7a, 0x3b, 0xaa, 0x4b, 0xef, 0x46, - 0x75, 0xe9, 0x1f, 0xa3, 0xba, 0xf4, 0xbb, 0x0f, 0xf5, 0x89, 0xe7, 0xd3, 0xc2, 0xe3, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x3a, 0x2e, 0x29, 0xcd, 0xb9, 0x19, 0x00, 0x00, + // 1871 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x29, 0x61, 0x1b, + 0x24, 0x72, 0x12, 0x2d, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x08, 0x2a, 0xca, 0x6e, 0xe2, 0x40, 0xaa, + 0x94, 0xa1, 0x94, 0xa2, 0x69, 0x0b, 0x64, 0xb8, 0x1c, 0xd3, 0x1b, 0xed, 0x3f, 0xec, 0x0e, 0x59, + 0x13, 0xbd, 0xf4, 0x03, 0x14, 0x48, 0xcf, 0xfd, 0x14, 0x3d, 0x16, 0xed, 0xad, 0x27, 0x5f, 0x0a, + 0x04, 0xbd, 0x34, 0x27, 0xa1, 0xa6, 0xaf, 0x6d, 0x6f, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xff, 0x5d, 0x89, 0x2a, 0x20, 0x1d, 0x72, 0xe3, 0xce, 0x7b, 0xef, 0xf7, 0xde, 0xcc, 0xbc, 0xf7, + 0xe6, 0xfd, 0x08, 0x3f, 0x3a, 0x7d, 0xdf, 0xd7, 0x0c, 0xa7, 0x7d, 0x3a, 0xec, 0x51, 0xcf, 0xa6, + 0x8c, 0xfa, 0xed, 0x11, 0xb5, 0xfb, 0x8e, 0xd7, 0x96, 0x02, 0xe2, 0x1a, 0x6d, 0xe2, 0xba, 0x7e, + 0x7b, 0x74, 0xa7, 0x47, 0x19, 0xb9, 0xd3, 0x1e, 0x50, 0x9b, 0x7a, 0x84, 0xd1, 0xbe, 0xe6, 0x7a, + 0x0e, 0x73, 0xd0, 0x5a, 0xa0, 0xa8, 0x11, 0xd7, 0xd0, 0xb8, 0xa2, 0x26, 0x15, 0xd7, 0xb7, 0x07, + 0x06, 0x7b, 0x3c, 0xec, 0x69, 0xba, 0x63, 0xb5, 0x07, 0xce, 0xc0, 0x69, 0x0b, 0xfd, 0xde, 0xf0, + 0x91, 0xf8, 0x12, 0x1f, 0xe2, 0x57, 0x80, 0xb3, 0xae, 0x26, 0x1c, 0xea, 0x8e, 0x47, 0xdb, 0xa3, + 0x9c, 0xaf, 0xf5, 0xdb, 0x09, 0x1d, 0xd7, 0x31, 0x0d, 0x7d, 0x5c, 0x16, 0xd6, 0xfa, 0xbb, 0xb1, + 0xaa, 0x45, 0xf4, 0xc7, 0x86, 0x4d, 0xbd, 0x71, 0xdb, 0x3d, 0x1d, 0xf0, 0x05, 0xbf, 0x6d, 0x51, + 0x46, 0x8a, 0x1c, 0xb4, 0xcb, 0xac, 0xbc, 0xa1, 0xcd, 0x0c, 0x8b, 0xe6, 0x0c, 0xde, 0xbb, 0xc8, + 0xc0, 0xd7, 0x1f, 0x53, 0x8b, 0xe4, 0xec, 0xde, 0x29, 0xb3, 0x1b, 0x32, 0xc3, 0x6c, 0x1b, 0x36, + 0xf3, 0x99, 0x97, 0x35, 0x52, 0xff, 0xa3, 0x00, 0xda, 0x73, 0x6c, 0xe6, 0x39, 0xa6, 0x49, 0x3d, + 0x4c, 0x47, 0x86, 0x6f, 0x38, 0x36, 0xfa, 0x02, 0xea, 0x7c, 0x3f, 0x7d, 0xc2, 0x48, 0x53, 0xd9, + 0x54, 0xb6, 0x16, 0x77, 0xde, 0xd6, 0xe2, 0x4b, 0x89, 0xe0, 0x35, 0xf7, 0x74, 0xc0, 0x17, 0x7c, + 0x8d, 0x6b, 0x6b, 0xa3, 0x3b, 0xda, 0x61, 0xef, 0x4b, 0xaa, 0xb3, 0x03, 0xca, 0x48, 0x07, 0x3d, + 0x3d, 0xdb, 0x98, 0x99, 0x9c, 0x6d, 0x40, 0xbc, 0x86, 0x23, 0x54, 0x74, 0x08, 0x55, 0x81, 0x5e, + 0x11, 0xe8, 0xdb, 0xa5, 0xe8, 0x72, 0xd3, 0x1a, 0x26, 0xbf, 0x7a, 0xf0, 0x84, 0x51, 0x9b, 0x87, + 0xd7, 0x79, 0x49, 0x42, 0x57, 0xef, 0x13, 0x46, 0xb0, 0x00, 0x42, 0x6f, 0x41, 0xdd, 0x93, 0xe1, + 0x37, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xce, 0x0d, 0xa9, 0x55, 0x0f, 0xb7, 0x85, 0x23, 0x0d, 0xf5, + 0xa9, 0x02, 0xb7, 0xf2, 0xfb, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0xdb, 0xbb, 0x36, 0xdd, 0xde, + 0xb9, 0xb5, 0xd8, 0x79, 0xe4, 0x38, 0x5c, 0x49, 0xec, 0xfb, 0x08, 0x6a, 0x06, 0xa3, 0x96, 0xdf, + 0xac, 0x6c, 0xce, 0x6e, 0x2d, 0xee, 0xbc, 0xa9, 0x95, 0xe4, 0xba, 0x96, 0x8f, 0xae, 0xb3, 0x24, + 0x71, 0x6b, 0x0f, 0x39, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0x56, 0x00, 0xee, 0x53, 0xd7, 0x74, 0xc6, + 0x16, 0xb5, 0xd9, 0x35, 0x5c, 0xdd, 0x43, 0xa8, 0xfa, 0x2e, 0xd5, 0xe5, 0xd5, 0xbd, 0x5e, 0xba, + 0x83, 0x38, 0xa8, 0xae, 0x4b, 0xf5, 0xf8, 0xd2, 0xf8, 0x17, 0x16, 0x10, 0xe8, 0x53, 0x98, 0xf3, + 0x19, 0x61, 0x43, 0x5f, 0x5c, 0xd9, 0xe2, 0xce, 0xed, 0x69, 0xc0, 0x84, 0x41, 0xa7, 0x21, 0xe1, + 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xd4, 0xbf, 0xcf, 0xc2, 0x4a, 0xac, 0xbc, 0xe7, 0xd8, 0x7d, 0x83, + 0xf1, 0x94, 0xbe, 0x07, 0x55, 0x36, 0x76, 0xa9, 0x38, 0x93, 0x85, 0xce, 0xeb, 0x61, 0x30, 0xc7, + 0x63, 0x97, 0xbe, 0x38, 0xdb, 0x58, 0x2b, 0x30, 0xe1, 0x22, 0x2c, 0x8c, 0xd0, 0x7e, 0x14, 0x67, + 0x45, 0x98, 0xbf, 0x9b, 0x76, 0xfe, 0xe2, 0x6c, 0xa3, 0xa0, 0xd7, 0x68, 0x11, 0x52, 0x3a, 0x44, + 0xf4, 0x1a, 0xcc, 0x79, 0x94, 0xf8, 0x8e, 0xdd, 0xac, 0x0a, 0xb4, 0x68, 0x2b, 0x58, 0xac, 0x62, + 0x29, 0x45, 0xb7, 0x61, 0xde, 0xa2, 0xbe, 0x4f, 0x06, 0xb4, 0x59, 0x13, 0x8a, 0xcb, 0x52, 0x71, + 0xfe, 0x20, 0x58, 0xc6, 0xa1, 0x1c, 0x7d, 0x09, 0x0d, 0x93, 0xf8, 0xec, 0xc4, 0xed, 0x13, 0x46, + 0x8f, 0x0d, 0x8b, 0x36, 0xe7, 0xc4, 0x81, 0xbe, 0x31, 0xdd, 0xdd, 0x73, 0x8b, 0xce, 0x2d, 0x89, + 0xde, 0xd8, 0x4f, 0x21, 0xe1, 0x0c, 0x32, 0x1a, 0x01, 0xe2, 0x2b, 0xc7, 0x1e, 0xb1, 0xfd, 0xe0, + 0xa0, 0xb8, 0xbf, 0xf9, 0x4b, 0xfb, 0x5b, 0x97, 0xfe, 0xd0, 0x7e, 0x0e, 0x0d, 0x17, 0x78, 0x50, + 0xff, 0xa8, 0x40, 0x23, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd3, 0xb5, 0xfa, 0xfd, 0x29, 0x92, + 0xb3, 0xa4, 0x46, 0xff, 0x59, 0x01, 0x14, 0x2b, 0x61, 0xc7, 0x34, 0x7b, 0x44, 0x3f, 0x45, 0x9b, + 0x50, 0xb5, 0x89, 0x15, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x16, 0xc5, 0x42, 0x82, 0xbe, 0x52, + 0x00, 0x0d, 0xc5, 0xd1, 0xf7, 0x77, 0x6d, 0xdb, 0x61, 0x84, 0x9f, 0x46, 0x18, 0xd0, 0xde, 0x14, + 0x01, 0x85, 0xbe, 0xb4, 0x93, 0x1c, 0xca, 0x03, 0x9b, 0x79, 0xe3, 0xf8, 0x16, 0xf2, 0x0a, 0xb8, + 0xc0, 0x35, 0xfa, 0x39, 0x80, 0x27, 0x31, 0x8f, 0x1d, 0x59, 0xb6, 0xe5, 0x3d, 0x20, 0x74, 0xbf, + 0xe7, 0xd8, 0x8f, 0x8c, 0x41, 0xdc, 0x58, 0x70, 0x04, 0x81, 0x13, 0x70, 0xeb, 0x0f, 0x60, 0xad, + 0x24, 0x4e, 0x74, 0x03, 0x66, 0x4f, 0xe9, 0x38, 0x38, 0x2a, 0xcc, 0x7f, 0xa2, 0x55, 0xa8, 0x8d, + 0x88, 0x39, 0xa4, 0x41, 0x4d, 0xe2, 0xe0, 0xe3, 0x6e, 0xe5, 0x7d, 0x45, 0xfd, 0x43, 0x2d, 0x99, + 0x29, 0xbc, 0xdf, 0xa0, 0x2d, 0xfe, 0x3c, 0xb8, 0xa6, 0xa1, 0x13, 0x5f, 0x60, 0xd4, 0x3a, 0x2f, + 0x05, 0x4f, 0x43, 0xb0, 0x86, 0x23, 0x29, 0xfa, 0x25, 0xd4, 0x7d, 0x6a, 0x52, 0x9d, 0x39, 0x9e, + 0x6c, 0x71, 0xef, 0x4c, 0x99, 0x53, 0xa4, 0x47, 0xcd, 0xae, 0x34, 0x0d, 0xe0, 0xc3, 0x2f, 0x1c, + 0x41, 0xa2, 0x4f, 0xa1, 0xce, 0xa8, 0xe5, 0x9a, 0x84, 0x51, 0x79, 0x7a, 0xa9, 0xbc, 0xe2, 0xbd, + 0x83, 0x83, 0x1d, 0x39, 0xfd, 0x63, 0xa9, 0x26, 0xba, 0x67, 0x94, 0xa7, 0xe1, 0x2a, 0x8e, 0x60, + 0xd0, 0xcf, 0xa0, 0xee, 0x33, 0xfe, 0xaa, 0x0f, 0xc6, 0xa2, 0xa3, 0x9c, 0xf7, 0xac, 0x24, 0xfb, + 0x68, 0x60, 0x12, 0x43, 0x87, 0x2b, 0x38, 0x82, 0x43, 0xbb, 0xb0, 0x6c, 0x19, 0x36, 0xa6, 0xa4, + 0x3f, 0xee, 0x52, 0xdd, 0xb1, 0xfb, 0xbe, 0x68, 0x45, 0xb5, 0xce, 0x9a, 0x34, 0x5a, 0x3e, 0x48, + 0x8b, 0x71, 0x56, 0x1f, 0xed, 0xc3, 0x6a, 0xf8, 0xec, 0x7e, 0x6c, 0xf8, 0xcc, 0xf1, 0xc6, 0xfb, + 0x86, 0x65, 0x30, 0xd1, 0xa0, 0x6a, 0x9d, 0xe6, 0xe4, 0x6c, 0x63, 0x15, 0x17, 0xc8, 0x71, 0xa1, + 0x15, 0xef, 0x9d, 0x2e, 0x19, 0xfa, 0xb4, 0x2f, 0x1a, 0x4e, 0x3d, 0xee, 0x9d, 0x47, 0x62, 0x15, + 0x4b, 0x29, 0xfa, 0x69, 0x2a, 0x4d, 0xeb, 0x97, 0x4b, 0xd3, 0x46, 0x79, 0x8a, 0xa2, 0x13, 0x58, + 0x73, 0x3d, 0x67, 0xe0, 0x51, 0xdf, 0xbf, 0x4f, 0x49, 0xdf, 0x34, 0x6c, 0x1a, 0x9e, 0xcc, 0x82, + 0xd8, 0xd1, 0x2b, 0x93, 0xb3, 0x8d, 0xb5, 0xa3, 0x62, 0x15, 0x5c, 0x66, 0xab, 0xfe, 0xa5, 0x0a, + 0x37, 0xb2, 0x6f, 0x1c, 0xfa, 0x04, 0x90, 0xd3, 0xf3, 0xa9, 0x37, 0xa2, 0xfd, 0x8f, 0x82, 0xc1, + 0x8d, 0x4f, 0x37, 0x8a, 0x98, 0x6e, 0xa2, 0xba, 0x3d, 0xcc, 0x69, 0xe0, 0x02, 0xab, 0x60, 0x3e, + 0x92, 0x05, 0x50, 0x11, 0x81, 0x26, 0xe6, 0xa3, 0x5c, 0x11, 0xec, 0xc2, 0xb2, 0xac, 0xfd, 0x50, + 0x28, 0x92, 0x35, 0x71, 0xef, 0x27, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x8f, 0xe0, 0x26, 0x19, 0x11, + 0xc3, 0x24, 0x3d, 0x93, 0x46, 0x20, 0x55, 0x01, 0xf2, 0xb2, 0x04, 0xb9, 0xb9, 0x9b, 0x55, 0xc0, + 0x79, 0x1b, 0x74, 0x00, 0x2b, 0x43, 0x3b, 0x0f, 0x15, 0xe4, 0xe1, 0x2b, 0x12, 0x6a, 0xe5, 0x24, + 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0x17, 0x00, 0x7a, 0xf8, 0x30, 0xfb, 0xcd, 0x39, 0xd1, 0x49, 0xdf, + 0x9a, 0xa2, 0x5e, 0xa2, 0xd7, 0x3c, 0xee, 0x62, 0xd1, 0x92, 0x8f, 0x13, 0x98, 0xe8, 0x1e, 0x2c, + 0x79, 0xbc, 0x02, 0xa2, 0x50, 0xe7, 0x45, 0xa8, 0xdf, 0x91, 0x66, 0x4b, 0x38, 0x29, 0xc4, 0x69, + 0x5d, 0x74, 0x17, 0x1a, 0xba, 0x63, 0x9a, 0x22, 0xf3, 0xf7, 0x9c, 0xa1, 0xcd, 0x44, 0xf2, 0xd6, + 0x3a, 0x88, 0xbf, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, 0xcf, 0x4c, 0x58, + 0xce, 0xe8, 0x6e, 0x6a, 0xf4, 0x79, 0x2d, 0x33, 0xfa, 0xdc, 0xca, 0x5b, 0x24, 0x26, 0x1f, 0x03, + 0x96, 0x78, 0xf2, 0x1b, 0xf6, 0x20, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0x3e, 0xb7, 0x94, 0x22, 0xed, + 0xc4, 0xc3, 0x78, 0x53, 0xec, 0x3c, 0x29, 0xc4, 0x69, 0x64, 0xf5, 0x43, 0x68, 0xa4, 0xeb, 0x30, + 0x35, 0xd3, 0x2b, 0x17, 0xce, 0xf4, 0xcf, 0x15, 0x58, 0x2b, 0xf1, 0x8e, 0x4c, 0x68, 0x58, 0xe4, + 0x49, 0x22, 0x47, 0x2e, 0x9c, 0x8d, 0x39, 0x6b, 0xd2, 0x02, 0xd6, 0xa4, 0x3d, 0xb4, 0xd9, 0xa1, + 0xd7, 0x65, 0x9e, 0x61, 0x0f, 0x82, 0x7b, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, 0xfa, 0x1c, 0xea, + 0x16, 0x79, 0xd2, 0x1d, 0x7a, 0x83, 0xa2, 0xf3, 0x9a, 0xce, 0x8f, 0x78, 0x3f, 0x0e, 0x24, 0x0a, + 0x8e, 0xf0, 0xd4, 0x43, 0xd8, 0x4c, 0x6d, 0x92, 0xb7, 0x0a, 0xfa, 0x68, 0x68, 0x76, 0x69, 0x7c, + 0xe1, 0x6f, 0xc2, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xed, 0xa2, 0xd6, 0x59, 0x9a, 0x9c, 0x6d, 0x2c, + 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0xd6, 0xd5, 0x89, 0x49, 0xaf, 0x81, 0x3a, + 0xdc, 0x4f, 0x51, 0x07, 0xb5, 0x34, 0x89, 0x44, 0x3c, 0xa5, 0xac, 0x61, 0x3f, 0xc3, 0x1a, 0x5e, + 0xbd, 0x00, 0xe7, 0x7c, 0xc2, 0xf0, 0x01, 0x2c, 0x44, 0xee, 0x52, 0x5d, 0x52, 0xb9, 0xa8, 0x4b, + 0xaa, 0xbf, 0xaf, 0xc0, 0x62, 0xc2, 0xc5, 0xe5, 0xac, 0xf9, 0x71, 0x27, 0x06, 0x0d, 0xde, 0x86, + 0x76, 0xa6, 0xd9, 0x88, 0x16, 0x0e, 0x15, 0xc1, 0xfc, 0x16, 0xbf, 0xde, 0xf9, 0x59, 0xe3, 0x43, + 0x68, 0x30, 0xe2, 0x0d, 0x28, 0x0b, 0x65, 0xe2, 0xc0, 0x16, 0xe2, 0x49, 0xff, 0x38, 0x25, 0xc5, + 0x19, 0xed, 0xf5, 0x7b, 0xb0, 0x94, 0x72, 0x76, 0xa9, 0x21, 0xec, 0x2b, 0x7e, 0x38, 0x71, 0x72, + 0x5e, 0x43, 0x76, 0x7d, 0x92, 0xca, 0xae, 0xad, 0xf2, 0xc3, 0x4c, 0x94, 0x4c, 0x59, 0x8e, 0xe1, + 0x4c, 0x8e, 0xbd, 0x31, 0x15, 0xda, 0xf9, 0x99, 0xf6, 0xaf, 0x0a, 0xac, 0x26, 0xb4, 0x63, 0x6e, + 0xfa, 0xc3, 0x54, 0x83, 0xde, 0xca, 0x34, 0xe8, 0x66, 0x91, 0xcd, 0x95, 0x91, 0xd3, 0x62, 0x76, + 0x37, 0x7b, 0xd5, 0xec, 0xee, 0x0a, 0x48, 0xb1, 0xfa, 0x27, 0x05, 0x96, 0x13, 0x67, 0x77, 0x0d, + 0x8c, 0xf1, 0x61, 0x9a, 0x31, 0xbe, 0x3a, 0x4d, 0xd2, 0x94, 0x50, 0xc6, 0xbf, 0xd6, 0x52, 0xc1, + 0x7f, 0xeb, 0x49, 0xcc, 0xaf, 0x61, 0x75, 0xe4, 0x98, 0x43, 0x8b, 0xee, 0x99, 0xc4, 0xb0, 0x42, + 0x05, 0x3e, 0x31, 0xce, 0x66, 0xff, 0x18, 0x8a, 0xe0, 0xa9, 0xe7, 0x1b, 0x3e, 0xa3, 0x36, 0xfb, + 0x2c, 0xb6, 0xec, 0x7c, 0x57, 0x3a, 0x59, 0xfd, 0xac, 0x00, 0x0e, 0x17, 0x3a, 0x41, 0x3f, 0x80, + 0x45, 0x3e, 0x30, 0x1b, 0x3a, 0xe5, 0xdc, 0x5b, 0x26, 0xd6, 0x8a, 0x04, 0x5a, 0xec, 0xc6, 0x22, + 0x9c, 0xd4, 0x43, 0x8f, 0x61, 0xc5, 0x75, 0xfa, 0x07, 0xc4, 0x26, 0x03, 0xca, 0xc7, 0x8c, 0x23, + 0xf1, 0x07, 0xb2, 0x60, 0x36, 0x0b, 0x9d, 0xf7, 0xc2, 0xc9, 0xf4, 0x28, 0xaf, 0xf2, 0x82, 0x53, + 0x84, 0xfc, 0xb2, 0x28, 0xea, 0x22, 0x48, 0xe4, 0x41, 0x63, 0x28, 0x9f, 0x7b, 0x49, 0xf4, 0x82, + 0xff, 0x5b, 0x76, 0xa6, 0xc9, 0xb0, 0x93, 0x94, 0x65, 0xdc, 0xfd, 0xd3, 0xeb, 0x38, 0xe3, 0xa1, + 0x94, 0xb8, 0xd5, 0xff, 0x1f, 0xe2, 0xa6, 0xfe, 0xbb, 0x0a, 0x37, 0x73, 0xad, 0x12, 0xfd, 0xf8, + 0x1c, 0x86, 0x73, 0xeb, 0xca, 0xd8, 0x4d, 0x6e, 0x40, 0x9f, 0xbd, 0xc4, 0x80, 0xbe, 0x0b, 0xcb, + 0xfa, 0xd0, 0xf3, 0xa8, 0xcd, 0x32, 0xac, 0x26, 0xa2, 0x46, 0x7b, 0x69, 0x31, 0xce, 0xea, 0x17, + 0xb1, 0xab, 0xda, 0x25, 0xd9, 0x55, 0x32, 0x0a, 0x39, 0x21, 0x07, 0x69, 0x97, 0x8f, 0x42, 0x0e, + 0xca, 0x59, 0x7d, 0x3e, 0x1d, 0x04, 0xa8, 0x11, 0xc2, 0x7c, 0x7a, 0x3a, 0x38, 0x49, 0x49, 0x71, + 0x46, 0xbb, 0x80, 0xa9, 0x2c, 0x4c, 0xcb, 0x54, 0x10, 0x49, 0x91, 0x30, 0x10, 0x35, 0xbe, 0x3d, + 0x4d, 0x2e, 0x4f, 0xcd, 0xc2, 0xd4, 0xbf, 0x29, 0xf0, 0x72, 0x69, 0x11, 0xa0, 0xdd, 0xd4, 0x93, + 0xbb, 0x9d, 0x79, 0x72, 0xbf, 0x57, 0x6a, 0x98, 0x78, 0x77, 0xbd, 0x62, 0x6a, 0xf4, 0xc1, 0x74, + 0xd4, 0xa8, 0x60, 0x6e, 0xbf, 0x98, 0x23, 0x75, 0xb6, 0x9f, 0x3e, 0x6b, 0xcd, 0x7c, 0xfd, 0xac, + 0x35, 0xf3, 0xcd, 0xb3, 0xd6, 0xcc, 0x6f, 0x26, 0x2d, 0xe5, 0xe9, 0xa4, 0xa5, 0x7c, 0x3d, 0x69, + 0x29, 0xdf, 0x4c, 0x5a, 0xca, 0x3f, 0x26, 0x2d, 0xe5, 0x77, 0xcf, 0x5b, 0x33, 0x9f, 0xcf, 0x4b, + 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x5d, 0x9e, 0x04, 0x8c, 0x1b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 4fec5aad..68397a02 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -338,6 +338,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -442,6 +463,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -454,3 +481,4 @@ message StatefulSetUpdateStrategy { // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; } + diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go index 84789be6..5b16819f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/register.go +++ b/vendor/k8s.io/api/apps/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 63d1c725..dd9e97e1 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -26,6 +26,7 @@ import ( const ( ControllerRevisionHashLabelKey = "controller-revision-hash" StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -247,6 +248,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 72d5e69c..d12baf39 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -206,6 +206,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -240,6 +253,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 2f046b02..cad744ce 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -23,105 +23,10 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -591,6 +496,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -698,6 +620,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index dafca120..e93e164e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 07d95d8a..2572ab08 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -28,6 +28,7 @@ limitations under the License. ControllerRevision ControllerRevisionList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -50,6 +51,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -97,120 +99,129 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptorGenerated, []int{21} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{28} + return fileDescriptorGenerated, []int{30} } func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") @@ -233,6 +244,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") @@ -355,6 +367,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -373,11 +427,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -412,28 +466,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n10, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -489,6 +543,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -515,11 +581,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -542,27 +608,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 return i, nil } @@ -600,19 +666,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 return i, nil } @@ -634,11 +700,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -678,28 +744,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -800,11 +866,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } return i, nil } @@ -827,27 +893,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n22, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n23, err := m.Spec.MarshalTo(dAtA[i:]) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n23 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n24, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 return i, nil } @@ -877,11 +943,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n25, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -911,11 +977,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n26, err := m.ListMeta.MarshalTo(dAtA[i:]) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -955,20 +1021,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n27, err := m.Selector.MarshalTo(dAtA[i:]) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n28, err := m.Template.MarshalTo(dAtA[i:]) + n29, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1039,11 +1105,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n29, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 } return i, nil } @@ -1067,21 +1133,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n31, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } return i, nil } @@ -1127,27 +1193,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n33 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n34, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 return i, nil } @@ -1237,27 +1303,69 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n35, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n36, err := m.Spec.MarshalTo(dAtA[i:]) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n36 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n37, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } @@ -1279,11 +1387,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n38, err := m.ListMeta.MarshalTo(dAtA[i:]) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1323,20 +1431,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n39, err := m.Selector.MarshalTo(dAtA[i:]) + n41, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n40, err := m.Template.MarshalTo(dAtA[i:]) + n42, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -1360,11 +1468,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n41, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -1416,6 +1524,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1442,11 +1562,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n42, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } return i, nil } @@ -1515,6 +1635,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -1561,6 +1697,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1834,6 +1976,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1893,6 +2051,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1956,6 +2120,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -1995,6 +2173,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2245,6 +2424,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -2286,6 +2479,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2689,6 +2883,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3183,6 +3573,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5969,6 +6390,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6554,6 +7171,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6797,138 +7445,142 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2119 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0xda, 0xa5, 0x2c, 0xc9, 0xa6, 0x54, 0x49, 0x91, 0xdb, 0x95, 0xb0, 0x09, - 0x1c, 0x39, 0xb6, 0x67, 0x6d, 0xe5, 0x81, 0xc4, 0x06, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xc9, 0x40, 0x83, 0x16, 0x30, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0xe9, 0xb9, 0xff, 0x44, 0x7b, 0x2a, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x8e, 0x3d, 0xf7, 0x12, 0xa0, 0x40, 0x41, - 0x0e, 0xe7, 0xc1, 0x79, 0x58, 0x23, 0x25, 0xde, 0x14, 0xb9, 0x69, 0xf9, 0xfd, 0xbe, 0x1f, 0x3f, - 0x0e, 0x3f, 0x7e, 0xdf, 0x6f, 0x38, 0x02, 0x3f, 0x3e, 0x79, 0xd7, 0x55, 0x35, 0xab, 0x75, 0xe2, - 0x1d, 0x11, 0xc7, 0x24, 0x94, 0xb8, 0xad, 0x3e, 0x31, 0xbb, 0x96, 0xd3, 0x12, 0x06, 0x6c, 0x6b, - 0x2d, 0x6c, 0xdb, 0x6e, 0xab, 0x7f, 0xe7, 0x88, 0x50, 0xbc, 0xd6, 0xea, 0x11, 0x93, 0x38, 0x98, - 0x92, 0xae, 0x6a, 0x3b, 0x16, 0xb5, 0xe0, 0x82, 0x0f, 0x54, 0xb1, 0xad, 0xa9, 0x0c, 0xa8, 0x0a, - 0xe0, 0xd2, 0xad, 0x9e, 0x46, 0x8f, 0xbd, 0x23, 0xb5, 0x63, 0x19, 0xad, 0x9e, 0xd5, 0xb3, 0x5a, - 0x1c, 0x7f, 0xe4, 0x3d, 0xe1, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0xb3, 0xd4, 0x8c, 0x4d, 0xd8, - 0xb1, 0x1c, 0xd2, 0xea, 0xdf, 0x49, 0xce, 0xb5, 0x74, 0x3d, 0x86, 0xb1, 0x2d, 0x5d, 0xeb, 0x0c, - 0x44, 0x58, 0x69, 0xe8, 0x5b, 0x11, 0xd4, 0xc0, 0x9d, 0x63, 0xcd, 0x24, 0xce, 0xa0, 0x65, 0x9f, - 0xf4, 0xd8, 0x80, 0xdb, 0x32, 0x08, 0xc5, 0x59, 0x13, 0xb4, 0xf2, 0xbc, 0x1c, 0xcf, 0xa4, 0x9a, - 0x41, 0x52, 0x0e, 0xef, 0x9c, 0xe5, 0xe0, 0x76, 0x8e, 0x89, 0x81, 0x53, 0x7e, 0x6f, 0xe6, 0xf9, - 0x79, 0x54, 0xd3, 0x5b, 0x9a, 0x49, 0x5d, 0xea, 0x24, 0x9d, 0x9a, 0xff, 0x51, 0x00, 0xdc, 0xb0, - 0x4c, 0xea, 0x58, 0xba, 0x4e, 0x1c, 0x44, 0xfa, 0x9a, 0xab, 0x59, 0x26, 0x7c, 0x0c, 0x6a, 0x6c, - 0x3d, 0x5d, 0x4c, 0xf1, 0xa2, 0xb2, 0xa2, 0xac, 0x4e, 0xae, 0xdd, 0x56, 0xa3, 0x4d, 0x09, 0xe9, - 0x55, 0xfb, 0xa4, 0xc7, 0x06, 0x5c, 0x95, 0xa1, 0xd5, 0xfe, 0x1d, 0x75, 0xf7, 0xe8, 0x63, 0xd2, - 0xa1, 0xdb, 0x84, 0xe2, 0x36, 0x7c, 0x76, 0xba, 0x3c, 0x36, 0x3c, 0x5d, 0x06, 0xd1, 0x18, 0x0a, - 0x59, 0xe1, 0x2e, 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x95, 0xcb, 0x2e, 0x16, 0xad, 0x22, 0xfc, - 0x8b, 0xf7, 0x9f, 0x52, 0x62, 0xb2, 0xf0, 0xda, 0x97, 0x04, 0x75, 0x65, 0x13, 0x53, 0x8c, 0x38, - 0x11, 0xbc, 0x09, 0x6a, 0x8e, 0x08, 0x7f, 0xb1, 0xbc, 0xa2, 0xac, 0x96, 0xdb, 0x97, 0x05, 0xaa, - 0x16, 0x2c, 0x0b, 0x85, 0x88, 0xe6, 0x33, 0x05, 0xcc, 0xa7, 0xd7, 0xbd, 0xa5, 0xb9, 0x14, 0xfe, - 0x2c, 0xb5, 0x76, 0xb5, 0xd8, 0xda, 0x99, 0x37, 0x5f, 0x79, 0x38, 0x71, 0x30, 0x12, 0x5b, 0xf7, - 0x1e, 0xa8, 0x6a, 0x94, 0x18, 0xee, 0x62, 0x69, 0xa5, 0xbc, 0x3a, 0xb9, 0x76, 0x43, 0xcd, 0xc9, - 0x75, 0x35, 0x1d, 0x5d, 0x7b, 0x4a, 0xf0, 0x56, 0x1f, 0x32, 0x06, 0xe4, 0x13, 0x35, 0x7f, 0x53, - 0x02, 0xf5, 0x4d, 0x4c, 0x0c, 0xcb, 0xdc, 0x27, 0x74, 0x04, 0x3b, 0xf7, 0x00, 0x54, 0x5c, 0x9b, - 0x74, 0xc4, 0xce, 0x5d, 0xcb, 0x5d, 0x40, 0x18, 0xd3, 0xbe, 0x4d, 0x3a, 0xd1, 0x96, 0xb1, 0x5f, - 0x88, 0x33, 0xc0, 0x3d, 0x30, 0xee, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd8, 0xe4, 0xda, 0x6a, 0x01, - 0x2e, 0x8e, 0x6f, 0x4f, 0x0b, 0xb6, 0x71, 0xff, 0x37, 0x12, 0x3c, 0xcd, 0x3f, 0x29, 0x60, 0x2a, - 0xc4, 0x8e, 0x60, 0x37, 0xef, 0xcb, 0xbb, 0xd9, 0x3c, 0x7b, 0x01, 0x39, 0x9b, 0xf8, 0x69, 0x39, - 0x16, 0x38, 0x7b, 0x44, 0xf0, 0xe7, 0xa0, 0xe6, 0x12, 0x9d, 0x74, 0xa8, 0xe5, 0x88, 0xc0, 0xdf, - 0x2c, 0x18, 0x38, 0x3e, 0x22, 0xfa, 0xbe, 0x70, 0x6d, 0x5f, 0x62, 0x91, 0x07, 0xbf, 0x50, 0x48, - 0x09, 0x3f, 0x04, 0x35, 0x4a, 0x0c, 0x5b, 0xc7, 0x94, 0x88, 0x9d, 0x7c, 0x35, 0x1e, 0x3c, 0x2b, - 0x97, 0x8c, 0x6c, 0xcf, 0xea, 0x1e, 0x08, 0x18, 0xdf, 0xc6, 0xf0, 0x61, 0x04, 0xa3, 0x28, 0xa4, - 0x81, 0x36, 0x98, 0xf6, 0xec, 0x2e, 0x43, 0x52, 0x56, 0x62, 0x7a, 0x03, 0xb1, 0xad, 0xb7, 0xcf, - 0x7e, 0x2a, 0x87, 0x92, 0x5f, 0x7b, 0x5e, 0xcc, 0x32, 0x2d, 0x8f, 0xa3, 0x04, 0x3f, 0x5c, 0x07, - 0x33, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, 0xd2, 0xb1, 0xcc, 0xae, 0xbb, 0x58, 0x59, 0x51, - 0x56, 0xab, 0xed, 0x05, 0x41, 0x30, 0xb3, 0x2d, 0x9b, 0x51, 0x12, 0x0f, 0xb7, 0xc0, 0x5c, 0x50, - 0x14, 0x1e, 0x68, 0x2e, 0xb5, 0x9c, 0xc1, 0x96, 0x66, 0x68, 0x74, 0x71, 0x9c, 0xf3, 0x2c, 0x0e, - 0x4f, 0x97, 0xe7, 0x50, 0x86, 0x1d, 0x65, 0x7a, 0x35, 0xff, 0x58, 0x05, 0x33, 0x89, 0x5c, 0x85, - 0x8f, 0xc0, 0x7c, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, 0x38, 0x22, 0xce, 0x7e, 0xe7, 0x98, - 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0x11, 0xeb, 0xfc, 0x46, 0x26, 0x0a, 0xe5, - 0x78, 0xc3, 0x0f, 0x00, 0x34, 0xf9, 0xd0, 0xb6, 0xe6, 0xba, 0x21, 0x67, 0x89, 0x73, 0x2e, 0x09, - 0x4e, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2f, 0x16, 0x63, 0x97, 0xb8, 0x9a, 0x43, 0xba, 0xc9, 0x18, - 0xcb, 0x72, 0x8c, 0x9b, 0x99, 0x28, 0x94, 0xe3, 0x0d, 0xdf, 0x06, 0x93, 0xfe, 0x6c, 0xfc, 0x99, - 0x8b, 0xcd, 0x99, 0x15, 0x64, 0x93, 0x3b, 0x91, 0x09, 0xc5, 0x71, 0x6c, 0x69, 0xd6, 0x91, 0x4b, - 0x9c, 0x3e, 0xe9, 0xde, 0xf7, 0x1b, 0x16, 0xab, 0xea, 0x55, 0x5e, 0xd5, 0xc3, 0xa5, 0xed, 0xa6, - 0x10, 0x28, 0xc3, 0x8b, 0x2d, 0xcd, 0xcf, 0x9a, 0xd4, 0xd2, 0xc6, 0xe5, 0xa5, 0x1d, 0x66, 0xa2, - 0x50, 0x8e, 0x37, 0xcb, 0x3d, 0x3f, 0xe4, 0xf5, 0x3e, 0xd6, 0x74, 0x7c, 0xa4, 0x93, 0xc5, 0x09, - 0x39, 0xf7, 0x76, 0x64, 0x33, 0x4a, 0xe2, 0xe1, 0x7d, 0x70, 0xc5, 0x1f, 0x3a, 0x34, 0x71, 0x48, - 0x52, 0xe3, 0x24, 0xaf, 0x08, 0x92, 0x2b, 0x3b, 0x49, 0x00, 0x4a, 0xfb, 0xc0, 0xbb, 0x60, 0xba, - 0x63, 0xe9, 0x3a, 0xcf, 0xc7, 0x0d, 0xcb, 0x33, 0xe9, 0x62, 0x9d, 0xb3, 0x40, 0x76, 0x86, 0x36, - 0x24, 0x0b, 0x4a, 0x20, 0x9b, 0x9f, 0x2a, 0x60, 0x21, 0xe7, 0x1c, 0xc2, 0x1f, 0x81, 0x0a, 0x1d, - 0xd8, 0x84, 0x27, 0x6a, 0xbd, 0x7d, 0x23, 0x28, 0xe1, 0x07, 0x03, 0x9b, 0x7c, 0x75, 0xba, 0x7c, - 0x35, 0xc7, 0x8d, 0x99, 0x11, 0x77, 0x84, 0xc7, 0x60, 0x8a, 0xf5, 0x30, 0xcd, 0xec, 0xf9, 0x10, - 0x51, 0x6a, 0x5a, 0xb9, 0x15, 0x01, 0xc5, 0xd1, 0x51, 0xd1, 0xbc, 0x32, 0x3c, 0x5d, 0x9e, 0x92, - 0x6c, 0x48, 0x26, 0x6e, 0xfe, 0xb6, 0x04, 0xc0, 0x26, 0xb1, 0x75, 0x6b, 0x60, 0x10, 0x73, 0x14, - 0x6d, 0xf0, 0xa1, 0xd4, 0x06, 0x5f, 0xcf, 0xaf, 0x71, 0x61, 0x50, 0xb9, 0x7d, 0xf0, 0xc3, 0x44, - 0x1f, 0xbc, 0x5e, 0x84, 0xec, 0xc5, 0x8d, 0xf0, 0xf3, 0x32, 0x98, 0x8d, 0xc0, 0x1b, 0x96, 0xd9, - 0xd5, 0xf8, 0x69, 0xb8, 0x27, 0xed, 0xe8, 0xeb, 0x89, 0x1d, 0x5d, 0xc8, 0x70, 0x89, 0xed, 0xe6, - 0x56, 0x18, 0x67, 0x89, 0xbb, 0xbf, 0x25, 0x4f, 0xfe, 0xd5, 0xe9, 0x72, 0x86, 0xe2, 0x56, 0x43, - 0x26, 0x39, 0x44, 0x78, 0x0d, 0x8c, 0x3b, 0x04, 0xbb, 0x96, 0xc9, 0xcb, 0x42, 0x3d, 0x5a, 0x0a, - 0xe2, 0xa3, 0x48, 0x58, 0xe1, 0x75, 0x30, 0x61, 0x10, 0xd7, 0xc5, 0x3d, 0xc2, 0x2b, 0x40, 0xbd, - 0x3d, 0x23, 0x80, 0x13, 0xdb, 0xfe, 0x30, 0x0a, 0xec, 0xf0, 0x63, 0x30, 0xad, 0x63, 0x57, 0xa4, - 0xe3, 0x81, 0x66, 0x10, 0x7e, 0xc6, 0x27, 0xd7, 0xde, 0x28, 0xb6, 0xf7, 0xcc, 0x23, 0xea, 0x3d, - 0x5b, 0x12, 0x13, 0x4a, 0x30, 0xc3, 0x3e, 0x80, 0x6c, 0xe4, 0xc0, 0xc1, 0xa6, 0xeb, 0x3f, 0x28, - 0x36, 0xdf, 0xc4, 0xb9, 0xe7, 0x0b, 0xeb, 0xd9, 0x56, 0x8a, 0x0d, 0x65, 0xcc, 0xd0, 0xfc, 0xb3, - 0x02, 0xa6, 0xa3, 0x6d, 0x1a, 0x81, 0xc6, 0x79, 0x20, 0x6b, 0x9c, 0x57, 0x0b, 0x24, 0x67, 0x8e, - 0xc8, 0xf9, 0xbc, 0x12, 0x0f, 0x9d, 0xab, 0x9c, 0x55, 0xa6, 0xda, 0x6d, 0x5d, 0xeb, 0x60, 0x57, - 0xb4, 0xc3, 0x4b, 0xbe, 0x62, 0xf7, 0xc7, 0x50, 0x68, 0x95, 0xf4, 0x50, 0xe9, 0xe5, 0xea, 0xa1, - 0xf2, 0x37, 0xa3, 0x87, 0x7e, 0x0a, 0x6a, 0x6e, 0xa0, 0x84, 0x2a, 0x9c, 0xf2, 0x46, 0xa1, 0x83, - 0x2d, 0x44, 0x50, 0x48, 0x1d, 0xca, 0x9f, 0x90, 0x2e, 0x4b, 0xf8, 0x54, 0xbf, 0x4d, 0xe1, 0xc3, - 0x0e, 0xb3, 0x8d, 0x3d, 0x97, 0x74, 0xf9, 0x09, 0xa8, 0x45, 0x87, 0x79, 0x8f, 0x8f, 0x22, 0x61, - 0x85, 0x87, 0x60, 0xc1, 0x76, 0xac, 0x9e, 0x43, 0x5c, 0x77, 0x93, 0xe0, 0xae, 0xae, 0x99, 0x24, - 0x58, 0x80, 0xdf, 0xb2, 0xae, 0x0e, 0x4f, 0x97, 0x17, 0xf6, 0xb2, 0x21, 0x28, 0xcf, 0xb7, 0xf9, - 0xb7, 0x0a, 0xb8, 0x9c, 0xac, 0x8d, 0x39, 0x2a, 0x42, 0xb9, 0x90, 0x8a, 0xb8, 0x19, 0xcb, 0x53, - 0x5f, 0x62, 0xc5, 0xde, 0x2e, 0x53, 0xb9, 0xba, 0x0e, 0x66, 0x84, 0x6a, 0x08, 0x8c, 0x42, 0x47, - 0x85, 0xdb, 0x73, 0x28, 0x9b, 0x51, 0x12, 0xcf, 0xb4, 0x41, 0xd4, 0xf2, 0x03, 0x92, 0x8a, 0xac, - 0x0d, 0xd6, 0x93, 0x00, 0x94, 0xf6, 0x81, 0xdb, 0x60, 0xd6, 0x33, 0xd3, 0x54, 0x7e, 0xba, 0x5c, - 0x15, 0x54, 0xb3, 0x87, 0x69, 0x08, 0xca, 0xf2, 0x83, 0x8f, 0x01, 0xe8, 0x04, 0x05, 0xdd, 0x5d, - 0x1c, 0xe7, 0x25, 0xe1, 0x66, 0x81, 0xb4, 0x0e, 0xbb, 0x40, 0xd4, 0x56, 0xc3, 0x21, 0x17, 0xc5, - 0x38, 0xe1, 0x3d, 0x30, 0xe5, 0x70, 0x49, 0x18, 0x84, 0xea, 0xcb, 0xaa, 0xef, 0x09, 0xb7, 0x29, - 0x14, 0x37, 0x22, 0x19, 0x9b, 0xa1, 0x84, 0x6a, 0x85, 0x95, 0xd0, 0x5f, 0x15, 0x00, 0xd3, 0xe7, - 0x10, 0xde, 0x95, 0x5a, 0xe6, 0xb5, 0x44, 0xcb, 0x9c, 0x4f, 0x7b, 0xc4, 0x3a, 0xa6, 0x96, 0xad, - 0x7f, 0x6e, 0x17, 0xd4, 0x3f, 0x51, 0x41, 0x2d, 0x26, 0x80, 0xc4, 0x63, 0x18, 0xcd, 0x3d, 0x40, - 0x51, 0x01, 0x14, 0x05, 0xf5, 0x0d, 0x08, 0xa0, 0x18, 0xd9, 0x8b, 0x05, 0xd0, 0xbf, 0x4b, 0x60, - 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x2e, 0x2f, 0x4d, 0x00, 0x65, 0x2b, 0x88, 0xf2, 0xcb, 0x56, - 0x10, 0x2f, 0x41, 0x78, 0x71, 0x51, 0x12, 0x3d, 0xba, 0xff, 0x27, 0x51, 0x12, 0x45, 0x95, 0x23, - 0x4a, 0xfe, 0x50, 0x8a, 0x87, 0xfe, 0x9d, 0x17, 0x25, 0x5f, 0xff, 0xca, 0xa4, 0xf9, 0xf7, 0x32, - 0xb8, 0x9c, 0x3c, 0x87, 0x52, 0x83, 0x54, 0xce, 0x6c, 0x90, 0x7b, 0x60, 0xee, 0x89, 0xa7, 0xeb, - 0x03, 0xfe, 0x18, 0x62, 0x5d, 0xd2, 0x6f, 0xad, 0xdf, 0x17, 0x9e, 0x73, 0x3f, 0xc9, 0xc0, 0xa0, - 0x4c, 0xcf, 0x9c, 0x66, 0x5f, 0xbe, 0x50, 0xb3, 0x4f, 0x75, 0xa0, 0xca, 0x39, 0x3a, 0x50, 0x66, - 0xe3, 0xae, 0x5e, 0xa0, 0x71, 0x9f, 0xaf, 0xd3, 0x66, 0x14, 0xae, 0xb3, 0x3a, 0x6d, 0xf3, 0xd7, - 0x0a, 0x98, 0xcf, 0x7e, 0xe1, 0x86, 0x3a, 0x98, 0x36, 0xf0, 0xd3, 0xf8, 0xbd, 0xc4, 0x59, 0x4d, - 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x5f, 0x19, 0xd4, 0x87, 0x26, 0xdd, 0x75, 0xf6, 0xa9, 0xa3, 0x99, - 0x3d, 0xbf, 0xf3, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x52, 0x01, 0x0b, 0x39, 0x9d, 0x6f, - 0xb4, 0x91, 0xc0, 0x8f, 0x40, 0xcd, 0xc0, 0x4f, 0xf7, 0x3d, 0xa7, 0x97, 0xd5, 0xab, 0x8b, 0xcd, - 0xc3, 0x4f, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0xac, 0x48, 0x8b, 0x64, 0x27, 0x87, - 0x3c, 0xf1, 0x74, 0x7e, 0x88, 0x84, 0xd8, 0xb8, 0x01, 0xea, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, - 0x6a, 0x7b, 0x6a, 0x78, 0xba, 0x5c, 0xdf, 0x0b, 0x06, 0x51, 0x64, 0x6f, 0xfe, 0x57, 0x01, 0xd5, - 0xfd, 0x0e, 0xd6, 0xc9, 0x08, 0xba, 0xfd, 0xa6, 0xd4, 0xed, 0xf3, 0x2f, 0xba, 0x79, 0x3c, 0xb9, - 0x8d, 0x7e, 0x2b, 0xd1, 0xe8, 0x5f, 0x3b, 0x83, 0xe7, 0xc5, 0x3d, 0xfe, 0x3d, 0x50, 0x0f, 0xa7, - 0x3b, 0x5f, 0x01, 0x6a, 0xfe, 0xbe, 0x04, 0x26, 0x63, 0x53, 0x9c, 0xb3, 0x7c, 0x3d, 0x96, 0xca, - 0x3e, 0x3b, 0x98, 0x6b, 0x45, 0x16, 0xa2, 0x06, 0x25, 0xfe, 0x7d, 0x93, 0x3a, 0xf1, 0x17, 0xbc, - 0x74, 0xe5, 0xff, 0x21, 0x98, 0xa6, 0xd8, 0xe9, 0x11, 0x1a, 0xd8, 0xf8, 0x03, 0xab, 0x47, 0xb7, - 0x13, 0x07, 0x92, 0x15, 0x25, 0xd0, 0x4b, 0xf7, 0xc0, 0x94, 0x34, 0x19, 0xbc, 0x0c, 0xca, 0x27, - 0x64, 0xe0, 0xcb, 0x1e, 0xc4, 0xfe, 0x84, 0x73, 0xa0, 0xda, 0xc7, 0xba, 0xe7, 0xe7, 0x79, 0x1d, - 0xf9, 0x3f, 0xee, 0x96, 0xde, 0x55, 0x9a, 0x9f, 0xb0, 0x87, 0x13, 0x25, 0xe7, 0x08, 0xb2, 0xeb, - 0x03, 0x29, 0xbb, 0xf2, 0xbf, 0x03, 0xc5, 0x8f, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0xbd, 0x51, - 0x88, 0xed, 0xc5, 0x99, 0xf6, 0x17, 0x05, 0xcc, 0xc4, 0xd0, 0x23, 0x10, 0x38, 0x0f, 0x65, 0x81, - 0xf3, 0x5a, 0x91, 0x45, 0xe4, 0x28, 0x9c, 0x7f, 0x54, 0xa5, 0xe0, 0xbf, 0xf3, 0x12, 0xe7, 0x97, - 0x60, 0xae, 0x6f, 0xe9, 0x9e, 0x41, 0x36, 0x74, 0xac, 0x19, 0x01, 0x80, 0x75, 0xf1, 0x72, 0xf2, - 0xdd, 0x22, 0xa4, 0x27, 0x8e, 0xab, 0xb9, 0x94, 0x98, 0xf4, 0x51, 0xe4, 0x19, 0xe9, 0x90, 0x47, - 0x19, 0x74, 0x28, 0x73, 0x12, 0xf8, 0x36, 0x98, 0x64, 0x7a, 0x42, 0xeb, 0x90, 0x1d, 0x6c, 0x04, - 0xc2, 0x39, 0xfc, 0xe2, 0xb1, 0x1f, 0x99, 0x50, 0x1c, 0x07, 0x8f, 0xc1, 0xac, 0x6d, 0x75, 0xb7, - 0xb1, 0x89, 0x7b, 0x84, 0xb5, 0xbd, 0x3d, 0xfe, 0xaf, 0x08, 0xfc, 0x32, 0xa6, 0xde, 0x7e, 0x27, - 0x78, 0x4b, 0xdf, 0x4b, 0x43, 0xd8, 0x4b, 0x4b, 0xc6, 0x30, 0x7f, 0x69, 0xc9, 0xa2, 0x84, 0x4e, - 0xea, 0x2b, 0x9d, 0x7f, 0x67, 0xb9, 0x56, 0x24, 0xc3, 0x2e, 0xf8, 0x9d, 0x2e, 0xef, 0xae, 0xa9, - 0x76, 0xa1, 0x8f, 0x6c, 0x9f, 0x54, 0xc0, 0x95, 0xd4, 0xd1, 0xfd, 0x16, 0x6f, 0x7b, 0x52, 0x72, - 0xb1, 0x7c, 0x0e, 0xb9, 0xb8, 0x0e, 0x66, 0xc4, 0xf7, 0xbd, 0x84, 0xda, 0x0c, 0xf5, 0xf8, 0x86, - 0x6c, 0x46, 0x49, 0x7c, 0xd6, 0x6d, 0x53, 0xf5, 0x9c, 0xb7, 0x4d, 0xf1, 0x28, 0xc4, 0xff, 0x50, - 0xf8, 0xa9, 0x97, 0x8e, 0x42, 0xfc, 0x2b, 0x45, 0x12, 0xcf, 0x3a, 0x96, 0xcf, 0x1a, 0x32, 0x4c, - 0xc8, 0x1d, 0xeb, 0x50, 0xb2, 0xa2, 0x04, 0xfa, 0x6b, 0x7d, 0xc3, 0xfa, 0xa7, 0x02, 0x5e, 0xc9, - 0xcd, 0x52, 0xb8, 0x2e, 0xbd, 0xf2, 0xdf, 0x4a, 0xbc, 0xf2, 0xff, 0x20, 0xd7, 0x31, 0xf6, 0xe2, - 0xef, 0x64, 0xdf, 0xe3, 0xbc, 0x57, 0xec, 0x1e, 0x27, 0x43, 0xe8, 0x9d, 0x7d, 0xa1, 0xd3, 0xbe, - 0xf5, 0xec, 0x79, 0x63, 0xec, 0xb3, 0xe7, 0x8d, 0xb1, 0x2f, 0x9e, 0x37, 0xc6, 0x7e, 0x35, 0x6c, - 0x28, 0xcf, 0x86, 0x0d, 0xe5, 0xb3, 0x61, 0x43, 0xf9, 0x62, 0xd8, 0x50, 0xfe, 0x35, 0x6c, 0x28, - 0xbf, 0xfb, 0xb2, 0x31, 0xf6, 0xd1, 0x84, 0x98, 0xf1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, - 0x1d, 0x40, 0xdd, 0x77, 0x25, 0x00, 0x00, + // 2186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, + 0x19, 0xf7, 0xec, 0x43, 0x5a, 0x51, 0x96, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, + 0x1c, 0x39, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, + 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, + 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, + 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, + 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, + 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xf4, 0xb6, 0xab, 0x6a, + 0x56, 0xeb, 0xc8, 0x3b, 0x20, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, + 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x03, 0x42, 0xf1, 0x6a, 0xab, 0x47, + 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x0b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, + 0x32, 0xa0, 0x2a, 0x80, 0x8b, 0x2b, 0x3d, 0x8d, 0x1e, 0x7a, 0x07, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, + 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xc0, 0x7b, 0xc4, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x36, + 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x93, 0x63, 0x2d, 0x5e, 0x8d, 0x61, 0x6c, 0x4b, + 0xd7, 0x3a, 0x03, 0xe1, 0x56, 0x1a, 0xfa, 0x46, 0x04, 0x35, 0x70, 0xe7, 0x50, 0x33, 0x89, 0x33, + 0x68, 0xd9, 0x47, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0xd6, 0x00, 0xad, 0x3c, 0x2b, 0xc7, + 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x5b, 0xa3, 0x0c, 0xdc, 0xce, 0x21, 0x31, 0x70, 0xca, 0xee, + 0xf5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x4b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51, 0xf3, 0x3f, 0x0a, + 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0x48, 0x5f, 0x73, 0x35, 0xcb, 0x84, 0x0f, + 0x41, 0x8d, 0xcd, 0xa7, 0x8b, 0x29, 0xae, 0x2b, 0x97, 0x95, 0xe5, 0xc9, 0xd5, 0x1b, 0x6a, 0xb4, + 0x29, 0x21, 0xbd, 0x6a, 0x1f, 0xf5, 0x58, 0x83, 0xab, 0x32, 0xb4, 0xda, 0xbf, 0xa9, 0xee, 0x1c, + 0x7c, 0x48, 0x3a, 0x74, 0x8b, 0x50, 0xdc, 0x86, 0x4f, 0x8e, 0x97, 0xce, 0x0d, 0x8f, 0x97, 0x40, + 0xd4, 0x86, 0x42, 0x56, 0xb8, 0x03, 0x2a, 0x9c, 0xbd, 0xc4, 0xd9, 0x57, 0x72, 0xd9, 0xc5, 0xa4, + 0x55, 0x84, 0x7f, 0xf2, 0xee, 0x63, 0x4a, 0x4c, 0xe6, 0x5e, 0xfb, 0xbc, 0xa0, 0xae, 0x6c, 0x60, + 0x8a, 0x11, 0x27, 0x82, 0xd7, 0x41, 0xcd, 0x11, 0xee, 0xd7, 0xcb, 0x97, 0x95, 0xe5, 0x72, 0xfb, + 0x82, 0x40, 0xd5, 0x82, 0x69, 0xa1, 0x10, 0xd1, 0x7c, 0xa2, 0x80, 0xf9, 0xf4, 0xbc, 0x37, 0x35, + 0x97, 0xc2, 0x1f, 0xa6, 0xe6, 0xae, 0x16, 0x9b, 0x3b, 0xb3, 0xe6, 0x33, 0x0f, 0x07, 0x0e, 0x5a, + 0x62, 0xf3, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0xad, 0x97, 0x2e, 0x97, 0x97, 0x27, 0x57, 0xaf, + 0xa9, 0x39, 0xb1, 0xae, 0xa6, 0xbd, 0x6b, 0x4f, 0x09, 0xde, 0xea, 0x3d, 0xc6, 0x80, 0x7c, 0xa2, + 0xe6, 0x2f, 0x4a, 0x60, 0x62, 0x03, 0x13, 0xc3, 0x32, 0xf7, 0x08, 0x3d, 0x83, 0x9d, 0xbb, 0x0b, + 0x2a, 0xae, 0x4d, 0x3a, 0x62, 0xe7, 0xae, 0xe4, 0x4e, 0x20, 0xf4, 0x69, 0xcf, 0x26, 0x9d, 0x68, + 0xcb, 0xd8, 0x13, 0xe2, 0x0c, 0x70, 0x17, 0x8c, 0xb9, 0x14, 0x53, 0xcf, 0xe5, 0x1b, 0x36, 0xb9, + 0xba, 0x5c, 0x80, 0x8b, 0xe3, 0xdb, 0xd3, 0x82, 0x6d, 0xcc, 0x7f, 0x46, 0x82, 0xa7, 0xf9, 0x8f, + 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0xbb, 0x1a, 0x65, 0xe1, 0x7c, 0x0b, 0x54, 0xe8, 0xc0, 0x26, + 0x7c, 0x41, 0x26, 0xda, 0x57, 0x02, 0x57, 0xee, 0x0f, 0x6c, 0xf2, 0xf9, 0xf1, 0xd2, 0x7c, 0xda, + 0x82, 0xf5, 0x20, 0x6e, 0x03, 0x37, 0x43, 0x27, 0x4b, 0xdc, 0xfa, 0x0d, 0x79, 0xe8, 0xcf, 0x8f, + 0x97, 0x32, 0x8e, 0x19, 0x35, 0x64, 0x92, 0x1d, 0x84, 0x7d, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, + 0x36, 0x5d, 0x7f, 0x24, 0xcd, 0x20, 0x62, 0xfa, 0xaf, 0x15, 0xdb, 0x28, 0x66, 0xd1, 0x5e, 0x14, + 0x5e, 0xc0, 0xcd, 0x14, 0x1b, 0xca, 0x18, 0x01, 0x5e, 0x01, 0x63, 0x0e, 0xc1, 0xae, 0x65, 0xd6, + 0x2b, 0x7c, 0x16, 0xe1, 0x02, 0x22, 0xde, 0x8a, 0x44, 0x2f, 0xbc, 0x0a, 0xc6, 0x0d, 0xe2, 0xba, + 0xb8, 0x47, 0xea, 0x55, 0x0e, 0x9c, 0x11, 0xc0, 0xf1, 0x2d, 0xbf, 0x19, 0x05, 0xfd, 0xcd, 0xdf, + 0x2b, 0x60, 0x2a, 0x5c, 0xb9, 0x33, 0xc8, 0x9c, 0x3b, 0x72, 0xe6, 0x34, 0x47, 0x07, 0x4b, 0x4e, + 0xc2, 0x7c, 0x54, 0x8e, 0x39, 0xce, 0xc2, 0x11, 0xfe, 0x08, 0xd4, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, + 0x1c, 0xe1, 0xf8, 0xeb, 0x05, 0x1d, 0xc7, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0xf3, 0xcc, 0xf3, + 0xe0, 0x09, 0x85, 0x94, 0xf0, 0x7d, 0x50, 0xa3, 0xc4, 0xb0, 0x75, 0x4c, 0x89, 0xc8, 0x9a, 0x97, + 0xe3, 0xce, 0xb3, 0x98, 0x61, 0x64, 0xbb, 0x56, 0xf7, 0xbe, 0x80, 0xf1, 0x94, 0x09, 0x17, 0x23, + 0x68, 0x45, 0x21, 0x0d, 0xb4, 0xc1, 0xb4, 0x67, 0x77, 0x19, 0x92, 0xb2, 0xe3, 0xbc, 0x37, 0x10, + 0x31, 0x74, 0x63, 0xf4, 0xaa, 0xec, 0x4b, 0x76, 0xed, 0x79, 0x31, 0xca, 0xb4, 0xdc, 0x8e, 0x12, + 0xfc, 0x70, 0x0d, 0xcc, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xf6, 0x48, 0xc7, 0x32, 0xbb, 0x2e, + 0x0f, 0xa5, 0x6a, 0x7b, 0x41, 0x10, 0xcc, 0x6c, 0xc9, 0xdd, 0x28, 0x89, 0x87, 0x9b, 0x60, 0x2e, + 0x38, 0x80, 0xef, 0x6a, 0x2e, 0xb5, 0x9c, 0xc1, 0xa6, 0x66, 0x68, 0xb4, 0x3e, 0xc6, 0x79, 0xea, + 0xc3, 0xe3, 0xa5, 0x39, 0x94, 0xd1, 0x8f, 0x32, 0xad, 0x9a, 0xbf, 0x19, 0x03, 0x33, 0x89, 0x73, + 0x01, 0x3e, 0x00, 0xf3, 0x1d, 0xcf, 0x71, 0x88, 0x49, 0xb7, 0x3d, 0xe3, 0x80, 0x38, 0x7b, 0x9d, + 0x43, 0xd2, 0xf5, 0x74, 0xd2, 0xe5, 0xdb, 0x5a, 0x6d, 0x37, 0x84, 0xaf, 0xf3, 0xeb, 0x99, 0x28, + 0x94, 0x63, 0x0d, 0xdf, 0x03, 0xd0, 0xe4, 0x4d, 0x5b, 0x9a, 0xeb, 0x86, 0x9c, 0x25, 0xce, 0x19, + 0xa6, 0xe2, 0x76, 0x0a, 0x81, 0x32, 0xac, 0x98, 0x8f, 0x5d, 0xe2, 0x6a, 0x0e, 0xe9, 0x26, 0x7d, + 0x2c, 0xcb, 0x3e, 0x6e, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x7c, 0x13, 0x4c, 0xfa, 0xa3, 0xf1, 0x35, + 0x17, 0x9b, 0x33, 0x2b, 0xc8, 0x26, 0xb7, 0xa3, 0x2e, 0x14, 0xc7, 0xb1, 0xa9, 0x59, 0x07, 0x2e, + 0x71, 0xfa, 0xa4, 0x7b, 0xc7, 0x17, 0x07, 0xac, 0x82, 0x56, 0x79, 0x05, 0x0d, 0xa7, 0xb6, 0x93, + 0x42, 0xa0, 0x0c, 0x2b, 0x36, 0x35, 0x3f, 0x6a, 0x52, 0x53, 0x1b, 0x93, 0xa7, 0xb6, 0x9f, 0x89, + 0x42, 0x39, 0xd6, 0x2c, 0xf6, 0x7c, 0x97, 0xd7, 0xfa, 0x58, 0xd3, 0xf1, 0x81, 0x4e, 0xea, 0xe3, + 0x72, 0xec, 0x6d, 0xcb, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x45, 0xbf, 0x69, 0xdf, 0xc4, 0x21, + 0x49, 0x8d, 0x93, 0xbc, 0x24, 0x48, 0x2e, 0x6e, 0x27, 0x01, 0x28, 0x6d, 0x03, 0x6f, 0x81, 0xe9, + 0x8e, 0xa5, 0xeb, 0x3c, 0x1e, 0xd7, 0x2d, 0xcf, 0xa4, 0xf5, 0x09, 0xce, 0x02, 0x59, 0x0e, 0xad, + 0x4b, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7, 0x0e, 0x46, 0x28, 0x80, + 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x23, 0x05, 0x2c, 0xe4, 0x24, + 0x3a, 0xfc, 0xae, 0x54, 0x04, 0xaf, 0x25, 0x8a, 0xe0, 0xa5, 0x1c, 0xb3, 0x58, 0x25, 0x3c, 0x04, + 0x53, 0x4c, 0x90, 0x68, 0x66, 0xcf, 0x87, 0x88, 0xb3, 0xac, 0x95, 0x3b, 0x01, 0x14, 0x47, 0x47, + 0xa7, 0xf2, 0xc5, 0xe1, 0xf1, 0xd2, 0x94, 0xd4, 0x87, 0x64, 0xe2, 0xe6, 0x2f, 0x4b, 0x00, 0x6c, + 0x10, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x42, 0xd3, 0xdc, 0x93, 0x34, 0xcd, 0xab, 0xf9, 0x5b, + 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4f, 0x88, 0x9a, 0xab, 0x45, 0xc8, 0x9e, 0xad, 0x6a, 0x3e, + 0x29, 0x83, 0xd9, 0x08, 0x1c, 0xc9, 0x9a, 0xdb, 0xd2, 0x8e, 0xbe, 0x9a, 0xd8, 0xd1, 0x85, 0x0c, + 0x93, 0x17, 0xa6, 0x6b, 0x9e, 0xbf, 0xbe, 0x80, 0x1f, 0x82, 0x69, 0x26, 0x64, 0xfc, 0x90, 0xe0, + 0x32, 0x69, 0xec, 0xc4, 0x32, 0x29, 0x2c, 0x6e, 0x9b, 0x12, 0x13, 0x4a, 0x30, 0xe7, 0xc8, 0xb2, + 0xf1, 0x17, 0x2d, 0xcb, 0x9a, 0x7f, 0x50, 0xc0, 0x74, 0xb4, 0x4d, 0x67, 0x20, 0xa2, 0xee, 0xca, + 0x22, 0xea, 0xe5, 0x02, 0xc1, 0x99, 0xa3, 0xa2, 0x3e, 0xa9, 0xc4, 0x5d, 0xe7, 0x32, 0x6a, 0x99, + 0xbd, 0x82, 0xd9, 0xba, 0xd6, 0xc1, 0xae, 0xa8, 0xb7, 0xe7, 0xfd, 0xd7, 0x2f, 0xbf, 0x0d, 0x85, + 0xbd, 0x92, 0xe0, 0x2a, 0xbd, 0x58, 0xc1, 0x55, 0x7e, 0x3e, 0x82, 0xeb, 0x07, 0xa0, 0xe6, 0x06, + 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x15, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, 0xea, 0x50, 0x5f, 0x85, 0x74, + 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, 0x5c, 0xd2, 0xe5, 0x19, 0x50, + 0x8b, 0x92, 0x79, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0x82, 0xed, 0x58, 0x3d, 0x87, 0xb8, + 0xee, 0x06, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, 0x5e, 0x1a, 0x1e, 0x2f, 0x2d, + 0xec, 0x66, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, 0x79, 0x36, 0xe6, 0xc8, 0x14, + 0xe5, 0x54, 0x32, 0xe5, 0x7a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, 0x2a, 0x48, 0xc5, 0xea, 0x1a, + 0x98, 0x11, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x7d, 0xb9, 0x1b, 0x25, 0xf1, 0x4c, + 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x2d, 0x09, 0x40, 0x69, 0x1b, 0xb8, 0x05, + 0x66, 0x3d, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0xbb, 0x9f, 0x86, 0xa0, 0x2c, 0x3b, + 0xf8, 0x50, 0xd2, 0x23, 0x63, 0xfc, 0x48, 0xb8, 0x5e, 0x20, 0xac, 0x0b, 0x0b, 0x12, 0x78, 0x1b, + 0x4c, 0x39, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, 0x6c, 0x0a, 0xc5, 0x3b, 0x91, + 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, 0xc0, 0x74, 0x1e, 0x8e, 0xbc, + 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, 0x5a, + 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9b, 0x4b, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, 0xa0, + 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, 0x5f, + 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, 0x27, 0x51, 0x12, 0x79, 0x95, + 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, 0xfc, 0x4e, 0xa6, 0xf9, 0x97, + 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, 0x2e, 0x98, 0x7b, 0xe4, 0xe9, + 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, 0x72, 0xee, 0xfb, 0x19, 0x18, + 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0xa7, 0x2a, 0xf6, 0xa9, 0x0a, 0x54, 0x39, 0x41, 0x05, 0xca, + 0x2c, 0xdc, 0xd5, 0x53, 0x14, 0xee, 0x93, 0x55, 0xda, 0x8c, 0x83, 0x6b, 0xe4, 0xab, 0xff, 0xcf, + 0x15, 0x30, 0x9f, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x6d, 0xe0, 0xc7, 0xf1, 0x8b, 0x8f, 0x51, 0x45, + 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0x7b, 0x26, 0xdd, 0x71, 0xf6, 0xa8, 0xa3, 0x99, + 0x3d, 0xbf, 0xf2, 0x6e, 0x49, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, 0x01, 0x0b, 0x39, 0x95, 0xef, + 0x6c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xef, 0x79, 0x4e, 0x2f, 0xab, 0x56, 0x17, 0x1b, + 0x87, 0x67, 0xf3, 0x96, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x07, 0x5c, 0x96, 0x26, 0xc9, 0x32, 0x87, + 0x3c, 0xf2, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x13, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, + 0x6a, 0x7b, 0x6a, 0x78, 0xbc, 0x34, 0xb1, 0x1b, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x5f, 0x05, 0x54, + 0xf7, 0x3a, 0x58, 0x27, 0x67, 0x50, 0xed, 0x37, 0xa4, 0x6a, 0x9f, 0x7f, 0x93, 0xce, 0xfd, 0xc9, + 0x2d, 0xf4, 0x9b, 0x89, 0x42, 0xff, 0xca, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0x77, 0xc0, 0x44, 0x38, + 0xdc, 0xc9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x32, 0x36, 0xc4, 0x09, 0x8f, 0xaf, 0x87, 0xd2, + 0xb1, 0xcf, 0x12, 0x73, 0xb5, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, 0xd7, 0xa4, 0x4e, 0xfc, 0x05, + 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0x4c, 0x53, 0xec, 0xf4, 0x08, 0x0d, 0xfa, 0xf8, 0x82, 0x4d, 0x44, + 0xb7, 0x13, 0xf7, 0xa5, 0x5e, 0x94, 0x40, 0x2f, 0xde, 0x06, 0x53, 0xd2, 0x60, 0xf0, 0x02, 0x28, + 0x1f, 0x91, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x40, 0xb5, 0x8f, 0x75, 0xcf, 0x8f, 0xf3, + 0x09, 0xe4, 0x3f, 0xdc, 0x2a, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, 0x27, 0x0a, 0xce, 0x33, 0x88, + 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, 0x18, 0x43, 0x89, 0x18, 0x7b, + 0xad, 0x10, 0xdb, 0xb3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x17, 0x43, 0x47, 0x72, 0xf2, 0xdb, 0x92, + 0x9c, 0x5c, 0x4e, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, 0xb4, 0x9e, 0xfc, 0xa3, 0x02, + 0x66, 0x62, 0x6b, 0x77, 0x06, 0x82, 0xf2, 0x9e, 0x2c, 0x28, 0x5f, 0x29, 0x12, 0x34, 0x39, 0x8a, + 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, 0xeb, 0x5b, 0xba, 0x67, 0x90, + 0x75, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, 0x0b, 0xe9, 0x89, 0xe3, 0x6a, + 0x2e, 0x25, 0x26, 0x7d, 0x10, 0x59, 0x46, 0xba, 0xef, 0x41, 0x06, 0x1d, 0xca, 0x1c, 0x04, 0xbe, + 0x09, 0x26, 0x99, 0x7e, 0xd3, 0x3a, 0x64, 0x1b, 0x1b, 0x41, 0x60, 0x85, 0x9f, 0xb0, 0xf6, 0xa2, + 0x2e, 0x14, 0xc7, 0xc1, 0x43, 0x30, 0x6b, 0x5b, 0xdd, 0x2d, 0x6c, 0xe2, 0x1e, 0x61, 0x32, 0x63, + 0x97, 0xff, 0x8f, 0x87, 0x5f, 0x7e, 0x4d, 0xb4, 0xdf, 0x0a, 0x6e, 0x45, 0x76, 0xd3, 0x10, 0xf6, + 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x6a, + 0x91, 0x08, 0x3b, 0xe5, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, 0xa9, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, + 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, 0xcb, 0x27, 0x90, 0xe7, 0x6b, 0x60, + 0x46, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x5d, 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, + 0x5e, 0xf5, 0x84, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, + 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0xe3, 0xb2, 0x42, 0xd8, 0x97, 0x7a, 0x51, + 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0x2b, 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, + 0xdf, 0x14, 0xf0, 0x52, 0x6e, 0x22, 0xc0, 0x35, 0xa9, 0xec, 0xae, 0x24, 0xca, 0xee, 0xb7, 0x72, + 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, + 0x47, 0xd7, 0x5e, 0x79, 0xf2, 0xb4, 0x71, 0xee, 0xe3, 0xa7, 0x8d, 0x73, 0x9f, 0x3e, 0x6d, 0x9c, + 0xfb, 0xd9, 0xb0, 0xa1, 0x3c, 0x19, 0x36, 0x94, 0x8f, 0x87, 0x0d, 0xe5, 0xd3, 0x61, 0x43, 0xf9, + 0xfb, 0xb0, 0xa1, 0xfc, 0xfa, 0xb3, 0xc6, 0xb9, 0x0f, 0xc6, 0xc5, 0x88, 0xff, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0xe4, 0x8f, 0x6a, 0x57, 0x17, 0x29, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index dcfd9c46..dac5a5f6 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -84,6 +84,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -99,9 +120,8 @@ message DaemonSetList { message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. @@ -175,6 +195,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. @@ -247,7 +273,7 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional + // It must match the pod template's labels. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. @@ -407,10 +433,9 @@ message ReplicaSetSpec { optional int32 minReadySeconds = 4; // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if @@ -569,6 +594,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -588,9 +634,8 @@ message StatefulSetSpec { optional int32 replicas = 1; // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if @@ -673,6 +718,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -688,3 +739,4 @@ message StatefulSetUpdateStrategy { // +optional optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; } + diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go index e207c7dc..2784ee37 100644 --- a/vendor/k8s.io/api/apps/v1beta2/register.go +++ b/vendor/k8s.io/api/apps/v1beta2/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index fa5676e1..fb54f95c 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -28,6 +28,7 @@ const ( StatefulSetRevisionLabel = ControllerRevisionHashLabelKey DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -169,10 +170,9 @@ type StatefulSetSpec struct { Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -254,6 +254,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -294,8 +319,8 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` @@ -525,10 +550,9 @@ type RollingUpdateDaemonSet struct { type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -601,6 +625,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -707,11 +758,10 @@ type ReplicaSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 35ae28ca..43864f4c 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -59,6 +59,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -71,7 +84,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", @@ -93,6 +106,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -147,7 +161,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", @@ -224,7 +238,7 @@ var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } @@ -315,6 +329,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -326,7 +353,7 @@ func (StatefulSetList) SwaggerDoc() map[string]string { var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", @@ -349,6 +376,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 124f84a9..d25c869b 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -23,141 +23,10 @@ package v1beta2 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -249,6 +118,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -331,6 +217,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -880,6 +773,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -978,6 +888,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 15b117a4..2d2ed2ee 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 96f1e776..fb7888b6 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -96,3 +96,4 @@ message UserInfo { // +optional map extra = 4; } + diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go index 936237c2..2ca79a62 100644 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index c1717c1c..243de759 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 065cfbd9..e0de315d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 1d8431bf..300e5348 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -96,3 +96,4 @@ message UserInfo { // +optional map extra = 4; } + diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index 7d89e5ef..ed23e50f 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 1b94ecec..aa8d2ef3 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index b02e09ca..c06b798d 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 0795a645..d34d105f 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,77 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto + // 1152 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, - 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, + 0x14, 0xf7, 0xae, 0xed, 0xd4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, + 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0x2b, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, 0x44, 0xa2, 0x08, 0xc4, 0x78, 0x3d, 0xb1, 0x97, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x38, - 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0x80, 0x90, 0x90, 0xb8, 0x71, 0xe0, 0x80, 0x72, - 0xec, 0xb1, 0x07, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, 0x9b, - 0x70, 0xa0, 0x97, 0xde, 0x76, 0xdf, 0xef, 0xf7, 0xfe, 0xcc, 0x7b, 0x6f, 0xde, 0x3c, 0xf0, 0xe0, - 0x70, 0x93, 0x59, 0xae, 0x6f, 0x1f, 0x86, 0x4d, 0x42, 0x3d, 0xc2, 0x09, 0xb3, 0xfb, 0xc4, 0x6b, - 0xf9, 0xd4, 0x56, 0x00, 0x0e, 0x5c, 0x1b, 0x87, 0xbc, 0xe3, 0x53, 0xf7, 0x1b, 0xcc, 0x5d, 0xdf, - 0xb3, 0xfb, 0xeb, 0x76, 0x9b, 0x78, 0x84, 0x62, 0x4e, 0x5a, 0x56, 0x40, 0x7d, 0xee, 0xc3, 0x1b, - 0x31, 0xd9, 0xc2, 0x81, 0x6b, 0x8d, 0x91, 0xad, 0xfe, 0xfa, 0xca, 0x7b, 0x6d, 0x97, 0x77, 0xc2, - 0xa6, 0xe5, 0xf8, 0x3d, 0xbb, 0xed, 0xb7, 0x7d, 0x5b, 0xea, 0x34, 0xc3, 0x03, 0xf9, 0x27, 0x7f, - 0xe4, 0x57, 0x6c, 0x6b, 0xe5, 0x76, 0xe2, 0xb8, 0x87, 0x9d, 0x8e, 0xeb, 0x11, 0x7a, 0x6c, 0x07, - 0x87, 0x6d, 0x21, 0x60, 0x76, 0x8f, 0x70, 0x9c, 0x11, 0xc1, 0x8a, 0x3d, 0x49, 0x8b, 0x86, 0x1e, - 0x77, 0x7b, 0xe4, 0x9c, 0xc2, 0xdd, 0x97, 0x29, 0x30, 0xa7, 0x43, 0x7a, 0xf8, 0x9c, 0xde, 0x07, - 0x93, 0xf4, 0x42, 0xee, 0x76, 0x6d, 0xd7, 0xe3, 0x8c, 0xd3, 0xb3, 0x4a, 0xe6, 0x06, 0x00, 0xdb, - 0x5f, 0x73, 0x8a, 0xf7, 0x71, 0x37, 0x24, 0xb0, 0x06, 0x8a, 0x2e, 0x27, 0x3d, 0x66, 0x68, 0xab, - 0xf9, 0xb5, 0x72, 0xa3, 0x1c, 0x0d, 0x6a, 0xc5, 0x1d, 0x21, 0x40, 0xb1, 0xfc, 0x5e, 0xe9, 0xfb, - 0x1f, 0x6b, 0xb9, 0x67, 0x7f, 0xae, 0xe6, 0xcc, 0x5f, 0x74, 0x60, 0x3c, 0xf4, 0x1d, 0xdc, 0xdd, - 0x0d, 0x9b, 0x5f, 0x12, 0x87, 0x6f, 0x39, 0x0e, 0x61, 0x0c, 0x91, 0xbe, 0x4b, 0x8e, 0xe0, 0x17, - 0xa0, 0x24, 0xd2, 0xd1, 0xc2, 0x1c, 0x1b, 0xda, 0xaa, 0xb6, 0x56, 0xa9, 0xbf, 0x6f, 0x25, 0x85, - 0x18, 0x45, 0x67, 0x05, 0x87, 0x6d, 0x21, 0x60, 0x96, 0x60, 0x5b, 0xfd, 0x75, 0xeb, 0x63, 0x69, - 0xeb, 0x11, 0xe1, 0xb8, 0x01, 0x4f, 0x06, 0xb5, 0x5c, 0x34, 0xa8, 0x81, 0x44, 0x86, 0x46, 0x56, - 0xe1, 0x3e, 0x28, 0xb0, 0x80, 0x38, 0x86, 0x2e, 0xad, 0xdf, 0xb6, 0xa6, 0x94, 0xd9, 0xca, 0x88, - 0x70, 0x37, 0x20, 0x4e, 0xe3, 0x8a, 0xf2, 0x50, 0x10, 0x7f, 0x48, 0xda, 0x83, 0x9f, 0x83, 0x19, - 0xc6, 0x31, 0x0f, 0x99, 0x91, 0x97, 0x96, 0xef, 0x5e, 0xda, 0xb2, 0xd4, 0x6e, 0xbc, 0xa1, 0x6c, - 0xcf, 0xc4, 0xff, 0x48, 0x59, 0x35, 0x3f, 0x05, 0x4b, 0x8f, 0x7d, 0x0f, 0x11, 0xe6, 0x87, 0xd4, - 0x21, 0x5b, 0x9c, 0x53, 0xb7, 0x19, 0x72, 0xc2, 0xe0, 0x2a, 0x28, 0x04, 0x98, 0x77, 0x64, 0xba, - 0xca, 0x49, 0x68, 0x4f, 0x30, 0xef, 0x20, 0x89, 0x08, 0x46, 0x9f, 0xd0, 0xa6, 0x3c, 0x72, 0x8a, - 0xb1, 0x4f, 0x68, 0x13, 0x49, 0xc4, 0xfc, 0x0a, 0xcc, 0xa7, 0x8c, 0xa3, 0xb0, 0x2b, 0x2b, 0x2a, - 0xa0, 0xb1, 0x8a, 0x0a, 0x0d, 0x86, 0x62, 0x39, 0xbc, 0x0f, 0xe6, 0xbd, 0x44, 0x67, 0x0f, 0x3d, - 0x64, 0x86, 0x2e, 0xa9, 0x8b, 0xd1, 0xa0, 0x96, 0x36, 0x27, 0x20, 0x74, 0x96, 0x6b, 0xfe, 0xa6, - 0x03, 0x98, 0x71, 0x1a, 0x1b, 0x94, 0x3d, 0xdc, 0x23, 0x2c, 0xc0, 0x0e, 0x51, 0x47, 0xba, 0xaa, - 0x02, 0x2e, 0x3f, 0x1e, 0x02, 0x28, 0xe1, 0xbc, 0xfc, 0x70, 0xf0, 0x2d, 0x50, 0x6c, 0x53, 0x3f, - 0x0c, 0x64, 0x61, 0xca, 0x8d, 0x39, 0x45, 0x29, 0x7e, 0x24, 0x84, 0x28, 0xc6, 0xe0, 0x4d, 0x30, - 0xdb, 0x27, 0x94, 0xb9, 0xbe, 0x67, 0x14, 0x24, 0x6d, 0x5e, 0xd1, 0x66, 0xf7, 0x63, 0x31, 0x1a, - 0xe2, 0xf0, 0x16, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x05, 0xc5, 0x2d, 0x8d, 0x32, 0x38, - 0x62, 0xc0, 0x3b, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, 0xca, 0x6e, - 0x02, 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0x98, 0x1d, 0x3f, 0x96, 0x48, 0x01, 0x92, 0x88, - 0xf9, 0xbb, 0x06, 0xae, 0x5c, 0xae, 0x62, 0xef, 0x82, 0x32, 0x0e, 0x5c, 0x79, 0xec, 0x61, 0xad, - 0xe6, 0x44, 0x5e, 0xb7, 0x9e, 0xec, 0xc4, 0x42, 0x94, 0xe0, 0x82, 0x3c, 0x0c, 0x46, 0xb4, 0xf4, - 0x88, 0x3c, 0x74, 0xc9, 0x50, 0x82, 0xc3, 0x0d, 0x30, 0x37, 0xfc, 0x91, 0x45, 0x32, 0x0a, 0x52, - 0xe1, 0x6a, 0x34, 0xa8, 0xcd, 0xa1, 0x34, 0x80, 0xc6, 0x79, 0xe6, 0xaf, 0x3a, 0x58, 0xde, 0x25, - 0xdd, 0x83, 0x57, 0x33, 0x0b, 0x9e, 0x8e, 0xcd, 0x82, 0xcd, 0xe9, 0x37, 0x36, 0x3b, 0xca, 0x57, - 0x36, 0x0f, 0x7e, 0xd0, 0xc1, 0x8d, 0x29, 0x31, 0xc1, 0x23, 0x00, 0xe9, 0xb9, 0xeb, 0xa5, 0xf2, - 0x68, 0x4f, 0x8d, 0xe5, 0xfc, 0xad, 0x6c, 0x5c, 0x8b, 0x06, 0xb5, 0x8c, 0xdb, 0x8a, 0x32, 0x5c, - 0xc0, 0x6f, 0x35, 0xb0, 0xe4, 0x65, 0x4d, 0x2a, 0x95, 0xe6, 0xfa, 0x54, 0xe7, 0x99, 0x33, 0xae, - 0x71, 0x3d, 0x1a, 0xd4, 0xb2, 0xc7, 0x1f, 0xca, 0xf6, 0x25, 0x5e, 0x99, 0x6b, 0xa9, 0xf4, 0x88, - 0x0b, 0xf2, 0xff, 0xf5, 0xd5, 0x27, 0x63, 0x7d, 0xb5, 0x71, 0xd1, 0xbe, 0x4a, 0x05, 0x39, 0xb1, - 0xad, 0x3e, 0x3b, 0xd3, 0x56, 0x77, 0x2e, 0xd2, 0x56, 0x69, 0xc3, 0xd3, 0xbb, 0xea, 0x11, 0x58, - 0x99, 0x1c, 0xd0, 0xa5, 0x87, 0xb3, 0xf9, 0x93, 0x0e, 0x16, 0x5f, 0x3f, 0xf3, 0x97, 0xb9, 0xd6, - 0x7f, 0x14, 0xc0, 0xf2, 0xeb, 0x2b, 0x3d, 0x69, 0xd1, 0x09, 0x19, 0xa1, 0xea, 0x19, 0x1f, 0x15, - 0x67, 0x8f, 0x11, 0x8a, 0x24, 0x02, 0x4d, 0x30, 0xd3, 0x8e, 0x5f, 0xb7, 0xf8, 0xfd, 0x01, 0x22, - 0xc1, 0xea, 0x69, 0x53, 0x08, 0x6c, 0x81, 0x22, 0x11, 0x7b, 0xab, 0x51, 0x5c, 0xcd, 0xaf, 0x55, - 0xea, 0x1f, 0xfe, 0x97, 0xce, 0xb0, 0xe4, 0xe6, 0xbb, 0xed, 0x71, 0x7a, 0x9c, 0xac, 0x13, 0x52, - 0x86, 0x62, 0xe3, 0xf0, 0x4d, 0x90, 0x0f, 0xdd, 0x96, 0x7a, 0xed, 0x2b, 0x8a, 0x92, 0xdf, 0xdb, - 0x79, 0x80, 0x84, 0x7c, 0x05, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0x0b, 0x20, 0x7f, 0x48, 0x8e, 0xe3, - 0x0b, 0x85, 0xc4, 0x27, 0xbc, 0x0f, 0x8a, 0x7d, 0xb1, 0x57, 0xab, 0xfc, 0xbe, 0x33, 0x35, 0xc8, - 0x64, 0x0d, 0x47, 0xb1, 0xd6, 0x3d, 0x7d, 0x53, 0x33, 0x7f, 0xd6, 0xc0, 0xf5, 0x89, 0xed, 0x27, - 0xd6, 0x1d, 0xdc, 0xed, 0xfa, 0x47, 0xa4, 0x25, 0xdd, 0x96, 0x92, 0x75, 0x67, 0x2b, 0x16, 0xa3, - 0x21, 0x0e, 0xdf, 0x06, 0x33, 0x94, 0x60, 0xe6, 0x7b, 0x6a, 0xc5, 0x1a, 0x75, 0x2e, 0x92, 0x52, - 0xa4, 0x50, 0xb8, 0x05, 0xe6, 0x89, 0x70, 0x2f, 0xe3, 0xda, 0xa6, 0xd4, 0x1f, 0x56, 0x6a, 0x59, - 0x29, 0xcc, 0x6f, 0x8f, 0xc3, 0xe8, 0x2c, 0xdf, 0xfc, 0x47, 0x07, 0xc6, 0xa4, 0x91, 0x05, 0x0f, - 0x92, 0x1d, 0x43, 0x82, 0x72, 0xcd, 0xa9, 0xd4, 0x6f, 0x5e, 0xa8, 0xf1, 0x85, 0x46, 0x63, 0x49, - 0x05, 0x32, 0x97, 0x96, 0xa6, 0x56, 0x12, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, 0x77, 0xe1, 0x78, - 0x59, 0xaa, 0xd4, 0x6f, 0x5d, 0xb4, 0xcd, 0xa5, 0x37, 0x43, 0x79, 0x5b, 0x38, 0x03, 0x30, 0x74, - 0xce, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, 0xb6, 0x52, 0x32, - 0xdf, 0x76, 0x46, 0x08, 0x4a, 0xb1, 0xb2, 0xf2, 0x5d, 0xb8, 0x5c, 0xbe, 0x1b, 0x6b, 0x27, 0xa7, - 0xd5, 0xdc, 0xf3, 0xd3, 0x6a, 0xee, 0xc5, 0x69, 0x35, 0xf7, 0x2c, 0xaa, 0x6a, 0x27, 0x51, 0x55, - 0x7b, 0x1e, 0x55, 0xb5, 0x17, 0x51, 0x55, 0xfb, 0x2b, 0xaa, 0x6a, 0xdf, 0xfd, 0x5d, 0xcd, 0x3d, - 0xd5, 0xfb, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x3f, 0xd9, 0x21, 0x54, 0x0f, 0x00, - 0x00, + 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0xc0, 0x05, 0x89, 0x1b, 0x07, 0x0e, 0x28, 0xc7, + 0x1e, 0x8b, 0x84, 0x2c, 0xb2, 0x9c, 0xf9, 0x0e, 0x68, 0x66, 0xc7, 0xde, 0x75, 0xb2, 0x76, 0x13, + 0x0e, 0xf4, 0xd2, 0xdb, 0xee, 0xfb, 0xfd, 0xde, 0x9f, 0x79, 0x7f, 0x66, 0x1e, 0xd8, 0x3a, 0xdc, + 0x60, 0x96, 0xeb, 0xdb, 0x87, 0x61, 0x93, 0x50, 0x8f, 0x70, 0xc2, 0xec, 0x3e, 0xf1, 0x5a, 0x3e, + 0xb5, 0x15, 0x80, 0x03, 0xd7, 0xc6, 0x21, 0xef, 0xf8, 0xd4, 0xfd, 0x06, 0x73, 0xd7, 0xf7, 0xec, + 0xfe, 0x9a, 0xdd, 0x26, 0x1e, 0xa1, 0x98, 0x93, 0x96, 0x15, 0x50, 0x9f, 0xfb, 0xf0, 0x66, 0x4c, + 0xb6, 0x70, 0xe0, 0x5a, 0x63, 0x64, 0xab, 0xbf, 0xb6, 0xfc, 0x5e, 0xdb, 0xe5, 0x9d, 0xb0, 0x69, + 0x39, 0x7e, 0xcf, 0x6e, 0xfb, 0x6d, 0xdf, 0x96, 0x3a, 0xcd, 0xf0, 0x40, 0xfe, 0xc9, 0x1f, 0xf9, + 0x15, 0xdb, 0x5a, 0xbe, 0x93, 0x38, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x1e, 0xdb, 0xc1, 0x61, + 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x44, 0xb0, 0x6c, 0x4f, 0xd2, 0xa2, 0xa1, 0xc7, 0xdd, + 0x1e, 0x39, 0xa7, 0x70, 0xef, 0x65, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xa7, 0xf7, 0xc1, 0x24, + 0xbd, 0x90, 0xbb, 0x5d, 0xdb, 0xf5, 0x38, 0xe3, 0xf4, 0xac, 0x92, 0xb9, 0x0e, 0xc0, 0xf6, 0xd7, + 0x9c, 0xe2, 0x7d, 0xdc, 0x0d, 0x09, 0xac, 0x81, 0xa2, 0xcb, 0x49, 0x8f, 0x19, 0xda, 0x4a, 0x7e, + 0xb5, 0xdc, 0x28, 0x47, 0x83, 0x5a, 0x71, 0x47, 0x08, 0x50, 0x2c, 0xbf, 0x5f, 0xfa, 0xfe, 0xc7, + 0x5a, 0xee, 0xd9, 0x9f, 0x2b, 0x39, 0xf3, 0x67, 0x1d, 0x18, 0x0f, 0x7d, 0x07, 0x77, 0x77, 0xc3, + 0xe6, 0x97, 0xc4, 0xe1, 0x9b, 0x8e, 0x43, 0x18, 0x43, 0xa4, 0xef, 0x92, 0x23, 0xf8, 0x05, 0x28, + 0x89, 0x74, 0xb4, 0x30, 0xc7, 0x86, 0xb6, 0xa2, 0xad, 0x56, 0xea, 0xef, 0x5b, 0x49, 0x21, 0x46, + 0xd1, 0x59, 0xc1, 0x61, 0x5b, 0x08, 0x98, 0x25, 0xd8, 0x56, 0x7f, 0xcd, 0xfa, 0x58, 0xda, 0x7a, + 0x44, 0x38, 0x6e, 0xc0, 0x93, 0x41, 0x2d, 0x17, 0x0d, 0x6a, 0x20, 0x91, 0xa1, 0x91, 0x55, 0xb8, + 0x0f, 0x0a, 0x2c, 0x20, 0x8e, 0xa1, 0x4b, 0xeb, 0x77, 0xac, 0x29, 0x65, 0xb6, 0x32, 0x22, 0xdc, + 0x0d, 0x88, 0xd3, 0xb8, 0xaa, 0x3c, 0x14, 0xc4, 0x1f, 0x92, 0xf6, 0xe0, 0xe7, 0x60, 0x86, 0x71, + 0xcc, 0x43, 0x66, 0xe4, 0xa5, 0xe5, 0x7b, 0x97, 0xb6, 0x2c, 0xb5, 0x1b, 0x6f, 0x28, 0xdb, 0x33, + 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc1, 0xe2, 0x63, 0xdf, 0x43, 0x84, 0xf9, 0x21, 0x75, 0xc8, + 0x26, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x02, 0x0a, 0x01, 0xe6, 0x1d, 0x99, 0xae, 0x72, + 0x12, 0xda, 0x13, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, 0x29, 0x8f, 0x9c, 0x62, 0xec, + 0x13, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x02, 0x73, 0x29, 0xe3, 0x28, 0xec, 0xca, 0x8a, 0x0a, 0x68, + 0xac, 0xa2, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x80, 0x39, 0x2f, 0xd1, 0xd9, 0x43, 0x0f, 0x99, + 0xa1, 0x4b, 0xea, 0x42, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, 0xe5, 0x9a, 0xbf, 0xea, 0x00, + 0x66, 0x9c, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, 0xd4, 0x91, 0xae, 0xa9, 0x80, + 0xcb, 0x8f, 0x87, 0x00, 0x4a, 0x38, 0x2f, 0x3f, 0x1c, 0x7c, 0x0b, 0x14, 0xdb, 0xd4, 0x0f, 0x03, + 0x59, 0x98, 0x72, 0x63, 0x56, 0x51, 0x8a, 0x1f, 0x09, 0x21, 0x8a, 0x31, 0x78, 0x0b, 0x5c, 0xe9, + 0x13, 0xca, 0x5c, 0xdf, 0x33, 0x0a, 0x92, 0x36, 0xa7, 0x68, 0x57, 0xf6, 0x63, 0x31, 0x1a, 0xe2, + 0xf0, 0x36, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x79, 0xc5, 0x2d, 0x8d, 0x32, 0x38, 0x62, + 0xc0, 0xbb, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x28, 0x85, 0xca, 0x6e, 0x02, + 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0xb8, 0x32, 0x7e, 0x2c, 0x91, 0x02, 0x24, 0x11, 0xf3, + 0x37, 0x0d, 0x5c, 0xbd, 0x5c, 0xc5, 0xde, 0x05, 0x65, 0x1c, 0xb8, 0xf2, 0xd8, 0xc3, 0x5a, 0xcd, + 0x8a, 0xbc, 0x6e, 0x3e, 0xd9, 0x89, 0x85, 0x28, 0xc1, 0x05, 0x79, 0x18, 0x8c, 0x68, 0xe9, 0x11, + 0x79, 0xe8, 0x92, 0xa1, 0x04, 0x87, 0xeb, 0x60, 0x76, 0xf8, 0x23, 0x8b, 0x64, 0x14, 0xa4, 0xc2, + 0xb5, 0x68, 0x50, 0x9b, 0x45, 0x69, 0x00, 0x8d, 0xf3, 0xcc, 0x5f, 0x74, 0xb0, 0xb4, 0x4b, 0xba, + 0x07, 0xaf, 0xe6, 0x2e, 0x78, 0x3a, 0x76, 0x17, 0x6c, 0x4c, 0x9f, 0xd8, 0xec, 0x28, 0x5f, 0xd9, + 0x7d, 0xf0, 0x83, 0x0e, 0x6e, 0x4e, 0x89, 0x09, 0x1e, 0x01, 0x48, 0xcf, 0x8d, 0x97, 0xca, 0xa3, + 0x3d, 0x35, 0x96, 0xf3, 0x53, 0xd9, 0xb8, 0x1e, 0x0d, 0x6a, 0x19, 0xd3, 0x8a, 0x32, 0x5c, 0xc0, + 0x6f, 0x35, 0xb0, 0xe8, 0x65, 0xdd, 0x54, 0x2a, 0xcd, 0xf5, 0xa9, 0xce, 0x33, 0xef, 0xb8, 0xc6, + 0x8d, 0x68, 0x50, 0xcb, 0xbe, 0xfe, 0x50, 0xb6, 0x2f, 0xf1, 0xca, 0x5c, 0x4f, 0xa5, 0x47, 0x0c, + 0xc8, 0xff, 0xd7, 0x57, 0x9f, 0x8c, 0xf5, 0xd5, 0xfa, 0x45, 0xfb, 0x2a, 0x15, 0xe4, 0xc4, 0xb6, + 0xfa, 0xec, 0x4c, 0x5b, 0xdd, 0xbd, 0x48, 0x5b, 0xa5, 0x0d, 0x4f, 0xef, 0xaa, 0x47, 0x60, 0x79, + 0x72, 0x40, 0x97, 0xbe, 0x9c, 0xcd, 0x9f, 0x74, 0xb0, 0xf0, 0xfa, 0x99, 0xbf, 0xcc, 0x58, 0xff, + 0x5e, 0x00, 0x4b, 0xaf, 0x47, 0x7a, 0xd2, 0xa2, 0x13, 0x32, 0x42, 0xd5, 0x33, 0x3e, 0x2a, 0xce, + 0x1e, 0x23, 0x14, 0x49, 0x04, 0x9a, 0x60, 0xa6, 0x1d, 0xbf, 0x6e, 0xf1, 0xfb, 0x03, 0x44, 0x82, + 0xd5, 0xd3, 0xa6, 0x10, 0xd8, 0x02, 0x45, 0x22, 0xf6, 0x56, 0xa3, 0xb8, 0x92, 0x5f, 0xad, 0xd4, + 0x3f, 0xfc, 0x2f, 0x9d, 0x61, 0xc9, 0xcd, 0x77, 0xdb, 0xe3, 0xf4, 0x38, 0x59, 0x27, 0xa4, 0x0c, + 0xc5, 0xc6, 0xe1, 0x9b, 0x20, 0x1f, 0xba, 0x2d, 0xf5, 0xda, 0x57, 0x14, 0x25, 0xbf, 0xb7, 0xb3, + 0x85, 0x84, 0x7c, 0x19, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0xf3, 0x20, 0x7f, 0x48, 0x8e, 0xe3, 0x81, + 0x42, 0xe2, 0x13, 0x3e, 0x00, 0xc5, 0xbe, 0xd8, 0xab, 0x55, 0x7e, 0xdf, 0x99, 0x1a, 0x64, 0xb2, + 0x86, 0xa3, 0x58, 0xeb, 0xbe, 0xbe, 0xa1, 0x99, 0x7f, 0x68, 0xe0, 0xc6, 0xc4, 0xf6, 0x13, 0xeb, + 0x0e, 0xee, 0x76, 0xfd, 0x23, 0xd2, 0x92, 0x6e, 0x4b, 0xc9, 0xba, 0xb3, 0x19, 0x8b, 0xd1, 0x10, + 0x87, 0x6f, 0x83, 0x19, 0x4a, 0x30, 0xf3, 0x3d, 0xb5, 0x62, 0x8d, 0x3a, 0x17, 0x49, 0x29, 0x52, + 0x28, 0xdc, 0x04, 0x73, 0x44, 0xb8, 0x97, 0x71, 0x6d, 0x53, 0xea, 0x0f, 0x2b, 0xb5, 0xa4, 0x14, + 0xe6, 0xb6, 0xc7, 0x61, 0x74, 0x96, 0x2f, 0x5c, 0xb5, 0x88, 0xe7, 0x92, 0x96, 0xdc, 0xc1, 0x4a, + 0x89, 0xab, 0x2d, 0x29, 0x45, 0x0a, 0x35, 0xff, 0xd1, 0x81, 0x31, 0xe9, 0x6a, 0x83, 0x07, 0xc9, + 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2e, 0x34, 0x20, 0x42, 0xa3, 0xb1, 0xa8, 0xdc, + 0xce, 0xa6, 0xa5, 0xa9, 0xd5, 0x45, 0xfe, 0x42, 0x0a, 0xe6, 0xbd, 0xf1, 0x9d, 0x39, 0x5e, 0xaa, + 0x2a, 0xf5, 0xdb, 0x17, 0x1d, 0x07, 0xe9, 0xcd, 0x50, 0xde, 0xe6, 0xcf, 0x00, 0x0c, 0x9d, 0xb3, + 0x0f, 0xeb, 0x00, 0xb8, 0x9e, 0xe3, 0xf7, 0x82, 0x2e, 0xe1, 0x44, 0xa6, 0xb7, 0x94, 0xdc, 0x83, + 0x3b, 0x23, 0x04, 0xa5, 0x58, 0x59, 0x75, 0x29, 0x5c, 0xae, 0x2e, 0x8d, 0xd5, 0x93, 0xd3, 0x6a, + 0xee, 0xf9, 0x69, 0x35, 0xf7, 0xe2, 0xb4, 0x9a, 0x7b, 0x16, 0x55, 0xb5, 0x93, 0xa8, 0xaa, 0x3d, + 0x8f, 0xaa, 0xda, 0x8b, 0xa8, 0xaa, 0xfd, 0x15, 0x55, 0xb5, 0xef, 0xfe, 0xae, 0xe6, 0x9e, 0xea, + 0xfd, 0xb5, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x0e, 0xab, 0x82, 0x7c, 0x0f, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index e3ce0274..28c8d660 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -226,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -263,3 +270,4 @@ message SubjectRulesReviewStatus { // +optional optional string evaluationError = 4; } + diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go index d716eaa9..59311981 100644 --- a/vendor/k8s.io/api/authorization/v1/register.go +++ b/vendor/k8s.io/api/authorization/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index 23b5ae70..86b05c54 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 1e3f780d..85503660 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index 916974ff..a415b2b1 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 6809b811..ea4f802e 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index e1f49d46..9b1ad299 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,78 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1139 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xfa, 0x47, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0xd1, 0xee, 0x92, 0x50, 0x48, 0x09, 0xf4, 0x10, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, - 0xe4, 0x40, 0x25, 0x60, 0x76, 0x33, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, 0x87, - 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0x3b, 0x47, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x1c, 0x90, - 0x45, 0x96, 0x3f, 0x82, 0x2b, 0x9a, 0xd9, 0xb1, 0x77, 0x1d, 0xaf, 0xe3, 0x24, 0x87, 0xf6, 0xd2, - 0xdb, 0xce, 0xfb, 0xde, 0xf7, 0xde, 0x9b, 0x37, 0xef, 0xbd, 0x7d, 0x60, 0xa7, 0x7d, 0x9f, 0x19, - 0x8e, 0x67, 0xb6, 0x03, 0x8b, 0x50, 0x97, 0x70, 0xc2, 0xcc, 0x1e, 0x71, 0x0f, 0x3c, 0x6a, 0x2a, - 0x00, 0xfb, 0x8e, 0x89, 0x03, 0xde, 0xf2, 0xa8, 0xf3, 0x3d, 0xe6, 0x8e, 0xe7, 0x9a, 0xbd, 0x35, - 0x8b, 0x70, 0xbc, 0x66, 0x36, 0x89, 0x4b, 0x28, 0xe6, 0xe4, 0xc0, 0xf0, 0xa9, 0xc7, 0x3d, 0x58, - 0x8b, 0x18, 0x06, 0xf6, 0x1d, 0x63, 0x84, 0x61, 0x28, 0xc6, 0xca, 0xdd, 0xa6, 0xc3, 0x5b, 0x81, - 0x65, 0xd8, 0x5e, 0xd7, 0x6c, 0x7a, 0x4d, 0xcf, 0x94, 0x44, 0x2b, 0x38, 0x94, 0x27, 0x79, 0x90, - 0x5f, 0x91, 0xc1, 0x95, 0x7b, 0x71, 0x08, 0x5d, 0x6c, 0xb7, 0x1c, 0x97, 0xd0, 0x63, 0xd3, 0x6f, - 0x37, 0x85, 0x80, 0x99, 0x5d, 0xc2, 0xb1, 0xd9, 0x1b, 0x0b, 0x63, 0xc5, 0x9c, 0xc4, 0xa2, 0x81, - 0xcb, 0x9d, 0x2e, 0x19, 0x23, 0x7c, 0x34, 0x8d, 0xc0, 0xec, 0x16, 0xe9, 0xe2, 0x31, 0xde, 0x07, - 0x93, 0x78, 0x01, 0x77, 0x3a, 0xa6, 0xe3, 0x72, 0xc6, 0xe9, 0x79, 0x52, 0x7d, 0x03, 0x80, 0xed, - 0xef, 0x38, 0xc5, 0xfb, 0xb8, 0x13, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x2e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0xfd, 0x6a, 0x61, 0x47, 0x08, 0x50, 0x24, 0xdf, 0x2c, 0xfe, 0xfc, - 0x4b, 0x35, 0xf3, 0xfc, 0xef, 0x5a, 0xa6, 0xfe, 0x47, 0x16, 0xe8, 0x8f, 0x3c, 0x1b, 0x77, 0x76, - 0x03, 0xeb, 0x1b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x73, 0xc8, 0x11, 0xfc, 0x1a, - 0x14, 0x45, 0x3a, 0x0e, 0x30, 0xc7, 0xba, 0x56, 0xd3, 0x56, 0xcb, 0xeb, 0xef, 0x1b, 0xf1, 0x6b, - 0x0c, 0xa3, 0x33, 0xfc, 0x76, 0x53, 0x08, 0x98, 0x21, 0xb4, 0x8d, 0xde, 0x9a, 0xf1, 0xb9, 0xb4, - 0xf5, 0x98, 0x70, 0xdc, 0x80, 0x27, 0xfd, 0x6a, 0x26, 0xec, 0x57, 0x41, 0x2c, 0x43, 0x43, 0xab, - 0xf0, 0x19, 0xc8, 0x33, 0x9f, 0xd8, 0x7a, 0x56, 0x5a, 0xff, 0xd8, 0x98, 0xf6, 0xd6, 0x46, 0x4a, - 0x98, 0xbb, 0x3e, 0xb1, 0x1b, 0x37, 0x94, 0x9b, 0xbc, 0x38, 0x21, 0x69, 0x14, 0xda, 0x60, 0x86, - 0x71, 0xcc, 0x03, 0xa6, 0xe7, 0xa4, 0xf9, 0x4f, 0xae, 0x67, 0x5e, 0x9a, 0x68, 0xbc, 0xa1, 0x1c, - 0xcc, 0x44, 0x67, 0xa4, 0x4c, 0xd7, 0x9f, 0x81, 0xa5, 0x27, 0x9e, 0x8b, 0x08, 0xf3, 0x02, 0x6a, - 0x93, 0x2d, 0xce, 0xa9, 0x63, 0x05, 0x9c, 0x30, 0x58, 0x03, 0x79, 0x1f, 0xf3, 0x96, 0x4c, 0x5c, - 0x29, 0x8e, 0xef, 0x29, 0xe6, 0x2d, 0x24, 0x11, 0xa1, 0xd1, 0x23, 0xd4, 0x92, 0x97, 0x4f, 0x68, - 0xec, 0x13, 0x6a, 0x21, 0x89, 0xd4, 0xbf, 0x05, 0xf3, 0x09, 0xe3, 0x28, 0xe8, 0xc8, 0xb7, 0x15, - 0xd0, 0xc8, 0xdb, 0x0a, 0x06, 0x43, 0x91, 0x1c, 0x3e, 0x00, 0xf3, 0x6e, 0xcc, 0xd9, 0x43, 0x8f, - 0x98, 0x9e, 0x95, 0xaa, 0x8b, 0x61, 0xbf, 0x9a, 0x34, 0x27, 0x20, 0x74, 0x5e, 0x57, 0x14, 0x04, - 0x4c, 0xb9, 0x8d, 0x09, 0x4a, 0x2e, 0xee, 0x12, 0xe6, 0x63, 0x9b, 0xa8, 0x2b, 0xdd, 0x54, 0x01, - 0x97, 0x9e, 0x0c, 0x00, 0x14, 0xeb, 0x4c, 0xbf, 0x1c, 0x7c, 0x1b, 0x14, 0x9a, 0xd4, 0x0b, 0x7c, - 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0x52, 0x29, 0x7c, 0x26, 0x84, 0x28, 0xc2, 0xe0, 0xbb, 0x60, 0xb6, - 0x47, 0x28, 0x73, 0x3c, 0x57, 0xcf, 0x4b, 0xb5, 0x79, 0xa5, 0x36, 0xbb, 0x1f, 0x89, 0xd1, 0x00, - 0x87, 0x77, 0x40, 0x91, 0xaa, 0xc0, 0xf5, 0x82, 0xd4, 0x5d, 0x50, 0xba, 0xc5, 0x61, 0x06, 0x87, - 0x1a, 0xf0, 0x43, 0x50, 0x66, 0x81, 0x35, 0x24, 0xcc, 0x48, 0xc2, 0xa2, 0x22, 0x94, 0x77, 0x63, - 0x08, 0x25, 0xf5, 0xc4, 0xb5, 0xc4, 0x1d, 0xf5, 0xd9, 0xd1, 0x6b, 0x89, 0x14, 0x20, 0x89, 0xd4, - 0xff, 0xd2, 0xc0, 0x8d, 0xab, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x5e, 0x7b, 0xf0, 0x56, - 0x73, 0x22, 0xaf, 0x5b, 0x4f, 0x77, 0x22, 0x21, 0x8a, 0x71, 0xa1, 0x3c, 0x08, 0x46, 0xd4, 0xf5, - 0x50, 0x79, 0xe0, 0x92, 0xa1, 0x18, 0x87, 0x1b, 0x60, 0x6e, 0x70, 0x90, 0x8f, 0xa4, 0xe7, 0x25, - 0xe1, 0x66, 0xd8, 0xaf, 0xce, 0xa1, 0x24, 0x80, 0x46, 0xf5, 0xea, 0x7f, 0x66, 0xc1, 0xf2, 0x2e, - 0xe9, 0x1c, 0xbe, 0x9a, 0xa9, 0xf0, 0xd5, 0xc8, 0x54, 0x78, 0x70, 0x89, 0xb6, 0x4d, 0x0f, 0xf5, - 0xd5, 0x4e, 0x86, 0x5f, 0xb3, 0xe0, 0xcd, 0x0b, 0x02, 0x83, 0x3f, 0x00, 0x48, 0xc7, 0x1a, 0x4d, - 0x65, 0xf4, 0xde, 0xf4, 0x80, 0xc6, 0x9b, 0xb4, 0x71, 0x2b, 0xec, 0x57, 0x53, 0x9a, 0x17, 0xa5, - 0xf8, 0x81, 0x3f, 0x6a, 0x60, 0xc9, 0x4d, 0x1b, 0x5c, 0x2a, 0xeb, 0x1b, 0xd3, 0x23, 0x48, 0x9d, - 0x7b, 0x8d, 0xdb, 0x61, 0xbf, 0x9a, 0x3e, 0x12, 0x51, 0xba, 0x43, 0x31, 0x72, 0x6e, 0x25, 0x12, - 0x25, 0x9a, 0xe6, 0xe5, 0xd5, 0xda, 0x97, 0x23, 0xb5, 0xf6, 0xe9, 0x95, 0x6a, 0x2d, 0x11, 0xe9, - 0xc4, 0x52, 0xb3, 0xce, 0x95, 0xda, 0xe6, 0xa5, 0x4b, 0x2d, 0x69, 0xfd, 0xe2, 0x4a, 0x7b, 0x0c, - 0x56, 0x26, 0x47, 0x75, 0xe5, 0xd1, 0x5d, 0xff, 0x3d, 0x0b, 0x16, 0x5f, 0xaf, 0x03, 0xd7, 0x6b, - 0xfa, 0xd3, 0x3c, 0x58, 0x7e, 0xdd, 0xf0, 0x17, 0x37, 0xbc, 0xf8, 0x89, 0x06, 0x8c, 0x50, 0xf5, - 0xe3, 0x1f, 0xbe, 0xd5, 0x1e, 0x23, 0x14, 0x49, 0x04, 0xd6, 0x06, 0xbb, 0x41, 0xf4, 0xc3, 0x02, - 0x22, 0xd3, 0xea, 0x5f, 0xa8, 0x16, 0x03, 0x07, 0x14, 0x88, 0xd8, 0x78, 0xf5, 0x42, 0x2d, 0xb7, - 0x5a, 0x5e, 0x7f, 0x78, 0xed, 0x5a, 0x31, 0xe4, 0xe2, 0xbc, 0xed, 0x72, 0x7a, 0x1c, 0xef, 0x20, - 0x52, 0x86, 0x22, 0x0f, 0xf0, 0x2d, 0x90, 0x0b, 0x9c, 0x03, 0xb5, 0x22, 0x94, 0x95, 0x4a, 0x6e, - 0x6f, 0xe7, 0x21, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, 0xda, 0xe4, - 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0x7a, 0x62, 0x2d, 0x57, 0x79, 0xbe, 0x33, 0x3d, - 0xd2, 0x78, 0x95, 0x47, 0x11, 0x75, 0x33, 0x7b, 0x5f, 0xab, 0xff, 0xa6, 0x81, 0xdb, 0x13, 0x0b, - 0x52, 0x2c, 0x4a, 0xb8, 0xd3, 0xf1, 0x8e, 0xc8, 0x81, 0xf4, 0x5d, 0x8c, 0x17, 0xa5, 0xad, 0x48, - 0x8c, 0x06, 0x38, 0x7c, 0x07, 0xcc, 0x50, 0x82, 0x99, 0xe7, 0xaa, 0xe5, 0x6c, 0x58, 0xcb, 0x48, - 0x4a, 0x91, 0x42, 0xe1, 0x16, 0x98, 0x27, 0xc2, 0xbd, 0x0c, 0x6e, 0x9b, 0x52, 0x6f, 0xf0, 0x62, - 0xcb, 0x8a, 0x30, 0xbf, 0x3d, 0x0a, 0xa3, 0xf3, 0xfa, 0xf5, 0xff, 0xb2, 0x40, 0x9f, 0x34, 0xce, - 0x60, 0x3b, 0xde, 0x4e, 0x24, 0x28, 0x17, 0xa4, 0xf2, 0xba, 0x71, 0xf9, 0x56, 0x10, 0xb4, 0xc6, - 0x92, 0x8a, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x34, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, 0xba, 0x4a, - 0x47, 0xbb, 0x56, 0x79, 0x7d, 0xed, 0x4a, 0x85, 0x2f, 0x5d, 0xea, 0xca, 0xe5, 0xc2, 0x39, 0x80, - 0xa1, 0x31, 0x27, 0x70, 0x1d, 0x00, 0xc7, 0xb5, 0xbd, 0xae, 0xdf, 0x21, 0x9c, 0xc8, 0x04, 0x16, - 0xe3, 0x29, 0xb8, 0x33, 0x44, 0x50, 0x42, 0x2b, 0x2d, 0xf3, 0xf9, 0xab, 0x65, 0xbe, 0x71, 0xf7, - 0xe4, 0xac, 0x92, 0x79, 0x71, 0x56, 0xc9, 0x9c, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a, 0x76, 0x12, - 0x56, 0xb4, 0x17, 0x61, 0x45, 0x3b, 0x0d, 0x2b, 0xda, 0x3f, 0x61, 0x45, 0xfb, 0xe9, 0xdf, 0x4a, - 0xe6, 0x8b, 0x59, 0x75, 0xc3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x29, 0xa9, 0x9d, 0x7c, 0xb1, - 0x0f, 0x00, 0x00, + // 1154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xf7, 0xfa, 0x25, 0xb1, 0xc7, 0xcd, 0x3f, 0xe9, 0x44, 0x69, 0xb6, 0xf9, 0x0b, 0xdb, 0x32, + 0x12, 0x0a, 0xa2, 0xdd, 0x25, 0xa1, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, + 0x44, 0xc9, 0x81, 0x4a, 0xc0, 0x78, 0x3d, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, + 0x87, 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0xef, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, + 0x64, 0x91, 0xe5, 0x43, 0x70, 0x45, 0x33, 0x3b, 0xf6, 0xae, 0xe3, 0x75, 0x1c, 0xe7, 0x40, 0x2f, + 0xbd, 0xed, 0x3c, 0xbf, 0xe7, 0x6d, 0x9e, 0x97, 0xd9, 0x1f, 0xd8, 0x6f, 0x3f, 0x64, 0x86, 0xed, + 0x9a, 0x6d, 0xbf, 0x4e, 0xa8, 0x43, 0x38, 0x61, 0x66, 0x8f, 0x38, 0x0d, 0x97, 0x9a, 0x0a, 0xc0, + 0x9e, 0x6d, 0x62, 0x9f, 0xb7, 0x5c, 0x6a, 0x7f, 0x8b, 0xb9, 0xed, 0x3a, 0x66, 0x6f, 0xa3, 0x4e, + 0x38, 0xde, 0x30, 0x9b, 0xc4, 0x21, 0x14, 0x73, 0xd2, 0x30, 0x3c, 0xea, 0x72, 0x17, 0x56, 0x42, + 0x0b, 0x03, 0x7b, 0xb6, 0x31, 0x62, 0x61, 0x28, 0x8b, 0xb5, 0xfb, 0x4d, 0x9b, 0xb7, 0xfc, 0xba, + 0x61, 0xb9, 0x5d, 0xb3, 0xe9, 0x36, 0x5d, 0x53, 0x1a, 0xd6, 0xfd, 0x63, 0x79, 0x92, 0x07, 0xf9, + 0x15, 0x3a, 0x5c, 0x7b, 0x10, 0xa5, 0xd0, 0xc5, 0x56, 0xcb, 0x76, 0x08, 0x3d, 0x35, 0xbd, 0x76, + 0x53, 0x08, 0x98, 0xd9, 0x25, 0x1c, 0x9b, 0xbd, 0xb1, 0x34, 0xd6, 0xcc, 0x49, 0x56, 0xd4, 0x77, + 0xb8, 0xdd, 0x25, 0x63, 0x06, 0x1f, 0x4c, 0x33, 0x60, 0x56, 0x8b, 0x74, 0xf1, 0x98, 0xdd, 0x7b, + 0x93, 0xec, 0x7c, 0x6e, 0x77, 0x4c, 0xdb, 0xe1, 0x8c, 0xd3, 0xcb, 0x46, 0xd5, 0x2d, 0x00, 0xf6, + 0xbe, 0xe1, 0x14, 0x1f, 0xe1, 0x8e, 0x4f, 0x60, 0x19, 0xe4, 0x6c, 0x4e, 0xba, 0x4c, 0xd7, 0x2a, + 0x99, 0xf5, 0x42, 0xad, 0x10, 0xf4, 0xcb, 0xb9, 0x7d, 0x21, 0x40, 0xa1, 0x7c, 0x3b, 0xff, 0xe3, + 0x4f, 0xe5, 0xd4, 0x8b, 0x3f, 0x2b, 0xa9, 0xea, 0xaf, 0x69, 0xa0, 0x3f, 0x76, 0x2d, 0xdc, 0x39, + 0xf0, 0xeb, 0x5f, 0x11, 0x8b, 0xef, 0x58, 0x16, 0x61, 0x0c, 0x91, 0x9e, 0x4d, 0x4e, 0xe0, 0x97, + 0x20, 0x2f, 0xca, 0xd1, 0xc0, 0x1c, 0xeb, 0x5a, 0x45, 0x5b, 0x2f, 0x6e, 0xbe, 0x6b, 0x44, 0xdd, + 0x18, 0x66, 0x67, 0x78, 0xed, 0xa6, 0x10, 0x30, 0x43, 0x68, 0x1b, 0xbd, 0x0d, 0xe3, 0x53, 0xe9, + 0xeb, 0x09, 0xe1, 0xb8, 0x06, 0xcf, 0xfa, 0xe5, 0x54, 0xd0, 0x2f, 0x83, 0x48, 0x86, 0x86, 0x5e, + 0xe1, 0x73, 0x90, 0x65, 0x1e, 0xb1, 0xf4, 0xb4, 0xf4, 0xfe, 0xa1, 0x31, 0xad, 0xd7, 0x46, 0x42, + 0x9a, 0x07, 0x1e, 0xb1, 0x6a, 0xb7, 0x54, 0x98, 0xac, 0x38, 0x21, 0xe9, 0x14, 0x5a, 0x60, 0x8e, + 0x71, 0xcc, 0x7d, 0xa6, 0x67, 0xa4, 0xfb, 0x8f, 0x6e, 0xe6, 0x5e, 0xba, 0xa8, 0xfd, 0x4f, 0x05, + 0x98, 0x0b, 0xcf, 0x48, 0xb9, 0xae, 0x3e, 0x07, 0x2b, 0x4f, 0x5d, 0x07, 0x11, 0xe6, 0xfa, 0xd4, + 0x22, 0x3b, 0x9c, 0x53, 0xbb, 0xee, 0x73, 0xc2, 0x60, 0x05, 0x64, 0x3d, 0xcc, 0x5b, 0xb2, 0x70, + 0x85, 0x28, 0xbf, 0x67, 0x98, 0xb7, 0x90, 0x44, 0x84, 0x46, 0x8f, 0xd0, 0xba, 0xbc, 0x7c, 0x4c, + 0xe3, 0x88, 0xd0, 0x3a, 0x92, 0x48, 0xf5, 0x6b, 0xb0, 0x18, 0x73, 0x8e, 0xfc, 0x8e, 0xec, 0xad, + 0x80, 0x46, 0x7a, 0x2b, 0x2c, 0x18, 0x0a, 0xe5, 0xf0, 0x11, 0x58, 0x74, 0x22, 0x9b, 0x43, 0xf4, + 0x98, 0xe9, 0x69, 0xa9, 0xba, 0x1c, 0xf4, 0xcb, 0x71, 0x77, 0x02, 0x42, 0x97, 0x75, 0xc5, 0x40, + 0xc0, 0x84, 0xdb, 0x98, 0xa0, 0xe0, 0xe0, 0x2e, 0x61, 0x1e, 0xb6, 0x88, 0xba, 0xd2, 0x6d, 0x95, + 0x70, 0xe1, 0xe9, 0x00, 0x40, 0x91, 0xce, 0xf4, 0xcb, 0xc1, 0x37, 0x41, 0xae, 0x49, 0x5d, 0xdf, + 0x93, 0xdd, 0x29, 0xd4, 0x16, 0x94, 0x4a, 0xee, 0x13, 0x21, 0x44, 0x21, 0x06, 0xdf, 0x06, 0xf3, + 0x3d, 0x42, 0x99, 0xed, 0x3a, 0x7a, 0x56, 0xaa, 0x2d, 0x2a, 0xb5, 0xf9, 0xa3, 0x50, 0x8c, 0x06, + 0x38, 0xbc, 0x07, 0xf2, 0x54, 0x25, 0xae, 0xe7, 0xa4, 0xee, 0x92, 0xd2, 0xcd, 0x0f, 0x2b, 0x38, + 0xd4, 0x80, 0xef, 0x83, 0x22, 0xf3, 0xeb, 0x43, 0x83, 0x39, 0x69, 0xb0, 0xac, 0x0c, 0x8a, 0x07, + 0x11, 0x84, 0xe2, 0x7a, 0xe2, 0x5a, 0xe2, 0x8e, 0xfa, 0xfc, 0xe8, 0xb5, 0x44, 0x09, 0x90, 0x44, + 0xaa, 0xbf, 0x6b, 0xe0, 0xd6, 0x6c, 0x1d, 0x7b, 0x07, 0x14, 0xb0, 0x67, 0xcb, 0x6b, 0x0f, 0x7a, + 0xb5, 0x20, 0xea, 0xba, 0xf3, 0x6c, 0x3f, 0x14, 0xa2, 0x08, 0x17, 0xca, 0x83, 0x64, 0xc4, 0x5c, + 0x0f, 0x95, 0x07, 0x21, 0x19, 0x8a, 0x70, 0xb8, 0x05, 0x16, 0x06, 0x07, 0xd9, 0x24, 0x3d, 0x2b, + 0x0d, 0x6e, 0x07, 0xfd, 0xf2, 0x02, 0x8a, 0x03, 0x68, 0x54, 0xaf, 0xfa, 0x5b, 0x1a, 0xac, 0x1e, + 0x90, 0xce, 0xf1, 0xab, 0x79, 0x15, 0xbe, 0x18, 0x79, 0x15, 0x1e, 0x5d, 0x63, 0x6d, 0x93, 0x53, + 0x7d, 0xb5, 0x2f, 0xc3, 0xcf, 0x69, 0xf0, 0xff, 0x2b, 0x12, 0x83, 0xdf, 0x01, 0x48, 0xc7, 0x16, + 0x4d, 0x55, 0xf4, 0xc1, 0xf4, 0x84, 0xc6, 0x97, 0xb4, 0x76, 0x27, 0xe8, 0x97, 0x13, 0x96, 0x17, + 0x25, 0xc4, 0x81, 0xdf, 0x6b, 0x60, 0xc5, 0x49, 0x7a, 0xb8, 0x54, 0xd5, 0xb7, 0xa6, 0x67, 0x90, + 0xf8, 0xee, 0xd5, 0xee, 0x06, 0xfd, 0x72, 0xf2, 0x93, 0x88, 0x92, 0x03, 0x8a, 0x27, 0xe7, 0x4e, + 0xac, 0x50, 0x62, 0x69, 0xfe, 0xbb, 0x59, 0xfb, 0x7c, 0x64, 0xd6, 0x3e, 0x9e, 0x69, 0xd6, 0x62, + 0x99, 0x4e, 0x1c, 0xb5, 0xfa, 0xa5, 0x51, 0xdb, 0xbe, 0xf6, 0xa8, 0xc5, 0xbd, 0x5f, 0x3d, 0x69, + 0x4f, 0xc0, 0xda, 0xe4, 0xac, 0x66, 0x7e, 0xba, 0xab, 0xbf, 0xa4, 0xc1, 0xf2, 0x6b, 0x3a, 0x70, + 0xb3, 0xa5, 0x3f, 0xcf, 0x82, 0xd5, 0xd7, 0x0b, 0x7f, 0xf5, 0xc2, 0x8b, 0x9f, 0xa8, 0xcf, 0x08, + 0x55, 0x3f, 0xfe, 0x61, 0xaf, 0x0e, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x32, 0xe0, 0x06, 0xe1, 0x0f, + 0x0b, 0x88, 0x4a, 0xab, 0x7f, 0xa1, 0x22, 0x06, 0x36, 0xc8, 0x11, 0xc1, 0x78, 0xf5, 0x5c, 0x25, + 0xb3, 0x5e, 0xdc, 0xdc, 0xbd, 0xf1, 0xac, 0x18, 0x92, 0x38, 0xef, 0x39, 0x9c, 0x9e, 0x46, 0x1c, + 0x44, 0xca, 0x50, 0x18, 0x01, 0xbe, 0x01, 0x32, 0xbe, 0xdd, 0x50, 0x14, 0xa1, 0xa8, 0x54, 0x32, + 0x87, 0xfb, 0xbb, 0x48, 0xc8, 0xd7, 0x8e, 0x15, 0xf7, 0x96, 0x2e, 0xe0, 0x12, 0xc8, 0xb4, 0xc9, + 0x69, 0xb8, 0x67, 0x48, 0x7c, 0xc2, 0x1a, 0xc8, 0xf5, 0x04, 0x2d, 0x57, 0x75, 0xbe, 0x37, 0x3d, + 0xd3, 0x88, 0xca, 0xa3, 0xd0, 0x74, 0x3b, 0xfd, 0x50, 0xab, 0xfe, 0xa1, 0x81, 0xbb, 0x13, 0x07, + 0x52, 0x10, 0x25, 0xdc, 0xe9, 0xb8, 0x27, 0xa4, 0x21, 0x63, 0xe7, 0x23, 0xa2, 0xb4, 0x13, 0x8a, + 0xd1, 0x00, 0x87, 0x6f, 0x81, 0x39, 0x4a, 0x30, 0x73, 0x1d, 0x45, 0xce, 0x86, 0xb3, 0x8c, 0xa4, + 0x14, 0x29, 0x14, 0xee, 0x80, 0x45, 0x22, 0xc2, 0xcb, 0xe4, 0xf6, 0x28, 0x75, 0x07, 0x1d, 0x5b, + 0x55, 0x06, 0x8b, 0x7b, 0xa3, 0x30, 0xba, 0xac, 0x2f, 0x42, 0x35, 0x88, 0x63, 0x93, 0x86, 0x64, + 0x6f, 0xf9, 0x28, 0xd4, 0xae, 0x94, 0x22, 0x85, 0x56, 0xff, 0x49, 0x03, 0x7d, 0xd2, 0xb3, 0x07, + 0xdb, 0x11, 0x8b, 0x91, 0xa0, 0x24, 0x52, 0xc5, 0x4d, 0xe3, 0xfa, 0x2b, 0x23, 0xcc, 0x6a, 0x2b, + 0x2a, 0xf6, 0x42, 0x5c, 0x1a, 0x63, 0x3e, 0xf2, 0x08, 0x4f, 0xc0, 0x92, 0x33, 0x4a, 0xb9, 0x43, + 0x4e, 0x56, 0xdc, 0xdc, 0x98, 0x69, 0x41, 0x64, 0x48, 0x5d, 0x85, 0x5c, 0xba, 0x04, 0x30, 0x34, + 0x16, 0x04, 0x6e, 0x02, 0x60, 0x3b, 0x96, 0xdb, 0xf5, 0x3a, 0x84, 0x13, 0x59, 0xe8, 0x7c, 0xf4, + 0x5a, 0xee, 0x0f, 0x11, 0x14, 0xd3, 0x4a, 0xea, 0x50, 0x76, 0xb6, 0x0e, 0xd5, 0xee, 0x9f, 0x5d, + 0x94, 0x52, 0x2f, 0x2f, 0x4a, 0xa9, 0xf3, 0x8b, 0x52, 0xea, 0x45, 0x50, 0xd2, 0xce, 0x82, 0x92, + 0xf6, 0x32, 0x28, 0x69, 0xe7, 0x41, 0x49, 0xfb, 0x2b, 0x28, 0x69, 0x3f, 0xfc, 0x5d, 0x4a, 0x7d, + 0x36, 0xaf, 0x6e, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xb3, 0x5e, 0x05, 0xd9, 0x0f, + 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 1394725d..59df0b6d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -226,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -263,3 +270,4 @@ message SubjectRulesReviewStatus { // +optional optional string evaluationError = 4; } + diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go index d8116d5a..84255dd6 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/register.go +++ b/vendor/k8s.io/api/authorization/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index f62f5956..618ff8c0 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 544c2fa4..2371b21c 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index aeb77ddb..fcb0067a 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 1689adb5..9c3be845 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 33b96f46..e41e6274 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -318,3 +318,4 @@ message ScaleStatus { // +optional optional string selector = 2; } + diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go index 521c0e4a..8dfe361e 100644 --- a/vendor/k8s.io/api/autoscaling/v1/register.go +++ b/vendor/k8s.io/api/autoscaling/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 20848f20..de4e6daa 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -23,92 +23,9 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 689f369b..da9789e5 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 6d970cdc..de3d2665 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -298,3 +298,4 @@ message ResourceMetricStatus { // It will always be set, regardless of the corresponding metric specification. optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } + diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go index 9025b421..12d697f0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/register.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index b538e5b4..0bb3dd30 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -23,80 +23,9 @@ package v2beta1 import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index 52a1d903..04491807 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 43d8acc7..0f43d2fd 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -170,3 +170,4 @@ message JobStatus { // +optional optional int32 failed = 6; } + diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go index 2cdfc98c..32fa51f0 100644 --- a/vendor/k8s.io/api/batch/v1/register.go +++ b/vendor/k8s.io/api/batch/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 8738bd5d..fa9ff4f9 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -22,44 +22,9 @@ package v1 import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Job).DeepCopyInto(out.(*Job)) - return nil - }, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobCondition).DeepCopyInto(out.(*JobCondition)) - return nil - }, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobList).DeepCopyInto(out.(*JobList)) - return nil - }, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobSpec).DeepCopyInto(out.(*JobSpec)) - return nil - }, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobStatus).DeepCopyInto(out.(*JobStatus)) - return nil - }, InType: reflect.TypeOf(&JobStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Job) DeepCopyInto(out *Job) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 653ebe04..43020ed0 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index d98d5978..11fa4e2a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; @@ -132,3 +135,4 @@ message JobTemplateSpec { // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } + diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 569817d3..226de49f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index ad13bba2..cb5c9bad 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index f4c74e43..3b53ac08 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c730ca98..0f8562f8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 288275d6..f4ed01ad 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 14ad77b4..cc90d419 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; @@ -130,3 +133,4 @@ message JobTemplateSpec { // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } + diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go index 4b02c0f4..ac7fa508 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/register.go +++ b/vendor/k8s.io/api/batch/v2alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index 451df40e..cccff94f 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index 2b3ed4c5..d166b807 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index b572f9ca..5474235d 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v2alpha1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index d98ee7c3..fb23aadb 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 88f51be1..e90f4f9c 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -119,3 +119,4 @@ message ExtraValue { repeated string items = 1; } + diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go index b5ba3d7e..b4f3af9b 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/register.go +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -46,7 +46,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 6ecc1311..de0715db 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -21,44 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequest).DeepCopyInto(out.(*CertificateSigningRequest)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestCondition).DeepCopyInto(out.(*CertificateSigningRequestCondition)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestList).DeepCopyInto(out.(*CertificateSigningRequestList)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestSpec).DeepCopyInto(out.(*CertificateSigningRequestSpec)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestStatus).DeepCopyInto(out.(*CertificateSigningRequestStatus)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { *out = *in diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index e623913f..de4e3cee 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -45,12 +45,6 @@ const ( // to one container of a pod. SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index a31af9ea..96994c62 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 1cd0bdc7..c2fe6bc5 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -33,6 +33,7 @@ limitations under the License. AzureFilePersistentVolumeSource AzureFileVolumeSource Binding + CSIPersistentVolumeSource Capabilities CephFSPersistentVolumeSource CephFSVolumeSource @@ -71,6 +72,7 @@ limitations under the License. EnvVarSource Event EventList + EventSeries EventSource ExecAction FCVolumeSource @@ -84,6 +86,7 @@ limitations under the License. Handler HostAlias HostPathVolumeSource + ISCSIPersistentVolumeSource ISCSIVolumeSource KeyToPath Lifecycle @@ -138,6 +141,8 @@ limitations under the License. PodAntiAffinity PodAttachOptions PodCondition + PodDNSConfig + PodDNSConfigOption PodExecOptions PodList PodLogOptions @@ -200,6 +205,7 @@ limitations under the License. Taint Toleration Volume + VolumeDevice VolumeMount VolumeProjection VolumeSource @@ -272,738 +278,766 @@ func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{53} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{61} +} func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{87} + return fileDescriptorGenerated, []int{90} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{97} + return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{98} + return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{99} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{100} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{108} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{109} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{129} + return fileDescriptorGenerated, []int{134} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{133} + return fileDescriptorGenerated, []int{138} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{137} + return fileDescriptorGenerated, []int{142} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} + return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{139} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} + return fileDescriptorGenerated, []int{145} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} + return fileDescriptorGenerated, []int{153} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{168} + return fileDescriptorGenerated, []int{173} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} + return fileDescriptorGenerated, []int{184} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{179} + return fileDescriptorGenerated, []int{185} } func init() { @@ -1015,6 +1049,7 @@ func init() { proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") @@ -1053,6 +1088,7 @@ func init() { proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") @@ -1066,6 +1102,7 @@ func init() { proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") @@ -1120,6 +1157,8 @@ func init() { proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") @@ -1182,6 +1221,7 @@ func init() { proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") @@ -1491,6 +1531,40 @@ func (m *Binding) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i += copy(dAtA[i:], m.VolumeHandle) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2294,6 +2368,20 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) i += copy(dAtA[i:], m.TerminationMessagePolicy) + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -3203,6 +3291,46 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n48, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n49, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n50, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) return i, nil } @@ -3224,11 +3352,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n48, err := m.ListMeta.MarshalTo(dAtA[i:]) + n51, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n51 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3244,6 +3372,39 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n52, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3395,11 +3556,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n49, err := m.SecretRef.MarshalTo(dAtA[i:]) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n53 } dAtA[i] = 0x20 i++ @@ -3583,11 +3744,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n50, err := m.Port.MarshalTo(dAtA[i:]) + n54, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n54 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -3656,31 +3817,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n51, err := m.Exec.MarshalTo(dAtA[i:]) + n55, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n55 } if m.HTTPGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n52, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n56, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n56 } if m.TCPSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n53, err := m.TCPSocket.MarshalTo(dAtA[i:]) + n57, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n57 } return i, nil } @@ -3750,6 +3911,98 @@ func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } + return i, nil +} + func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3819,11 +4072,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n54, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n59 } dAtA[i] = 0x58 i++ @@ -3892,21 +4145,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n55, err := m.PostStart.MarshalTo(dAtA[i:]) + n60, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n60 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n56, err := m.PreStop.MarshalTo(dAtA[i:]) + n61, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n61 } return i, nil } @@ -3929,19 +4182,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n57, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n62 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n58, err := m.Spec.MarshalTo(dAtA[i:]) + n63, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n63 return i, nil } @@ -3988,11 +4241,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n59, err := (&v).MarshalTo(dAtA[i:]) + n64, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n64 } } if len(m.Min) > 0 { @@ -4019,11 +4272,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n60, err := (&v).MarshalTo(dAtA[i:]) + n65, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n65 } } if len(m.Default) > 0 { @@ -4050,11 +4303,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n61, err := (&v).MarshalTo(dAtA[i:]) + n66, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n66 } } if len(m.DefaultRequest) > 0 { @@ -4081,11 +4334,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n62, err := (&v).MarshalTo(dAtA[i:]) + n67, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n67 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4112,11 +4365,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n63, err := (&v).MarshalTo(dAtA[i:]) + n68, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n68 } } return i, nil @@ -4140,11 +4393,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) + n69, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n69 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4208,11 +4461,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) + n70, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n70 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4431,27 +4684,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n66, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n71, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n71 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n67, err := m.Spec.MarshalTo(dAtA[i:]) + n72, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n72 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n68, err := m.Status.MarshalTo(dAtA[i:]) + n73, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n73 return i, nil } @@ -4473,11 +4726,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n69, err := m.ListMeta.MarshalTo(dAtA[i:]) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4566,27 +4819,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n70, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n75, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n75 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n71, err := m.Spec.MarshalTo(dAtA[i:]) + n76, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n76 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n72, err := m.Status.MarshalTo(dAtA[i:]) + n77, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n77 return i, nil } @@ -4635,11 +4888,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n73, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n78, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n78 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -4682,19 +4935,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n74, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n79, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n79 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n75, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n80, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n80 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -4725,11 +4978,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n76, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n81, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n81 } return i, nil } @@ -4752,11 +5005,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n77, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n82, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n82 return i, nil } @@ -4778,11 +5031,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n78, err := m.ListMeta.MarshalTo(dAtA[i:]) + n83, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n83 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4859,11 +5112,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n79, err := (&v).MarshalTo(dAtA[i:]) + n84, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n84 } } return i, nil @@ -5021,11 +5274,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n80, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n85, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n85 } return i, nil } @@ -5069,11 +5322,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n81, err := (&v).MarshalTo(dAtA[i:]) + n86, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n86 } } if len(m.Allocatable) > 0 { @@ -5100,11 +5353,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n82, err := (&v).MarshalTo(dAtA[i:]) + n87, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n87 } } dAtA[i] = 0x1a @@ -5138,19 +5391,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n83, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n88, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n88 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n84, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n89, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n89 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5322,20 +5575,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n85, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n90, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n90 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n86, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n91, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n91 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -5423,11 +5676,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n87, err := m.Initializers.MarshalTo(dAtA[i:]) + n92, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n92 } return i, nil } @@ -5496,27 +5749,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n88, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n93, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n93 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n89, err := m.Spec.MarshalTo(dAtA[i:]) + n94, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n94 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n90, err := m.Status.MarshalTo(dAtA[i:]) + n95, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n95 return i, nil } @@ -5538,27 +5791,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n91, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n96, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n96 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n92, err := m.Spec.MarshalTo(dAtA[i:]) + n97, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n97 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n93, err := m.Status.MarshalTo(dAtA[i:]) + n98, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n98 return i, nil } @@ -5588,19 +5841,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n94, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n99, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n99 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n95, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n100, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n100 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5630,11 +5883,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n96, err := m.ListMeta.MarshalTo(dAtA[i:]) + n101, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n101 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5683,11 +5936,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n97, err := m.Resources.MarshalTo(dAtA[i:]) + n102, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n102 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -5696,11 +5949,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n98, err := m.Selector.MarshalTo(dAtA[i:]) + n103, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n103 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -5708,6 +5961,12 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) i += copy(dAtA[i:], *m.StorageClassName) } + if m.VolumeMode != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -5769,11 +6028,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n99, err := (&v).MarshalTo(dAtA[i:]) + n104, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n104 } } if len(m.Conditions) > 0 { @@ -5839,11 +6098,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n100, err := m.ListMeta.MarshalTo(dAtA[i:]) + n105, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n105 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5878,163 +6137,163 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n101, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n102, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n102 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n103, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n103 - } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n104, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n104 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n105, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n105 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n106, err := m.RBD.MarshalTo(dAtA[i:]) + n106, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n106 } - if m.ISCSI != nil { - dAtA[i] = 0x3a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n107, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n107, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n107 } - if m.Cinder != nil { - dAtA[i] = 0x42 + if m.HostPath != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n108, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n108, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n108 } - if m.CephFS != nil { - dAtA[i] = 0x4a + if m.Glusterfs != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n109, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n109, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n109 } - if m.FC != nil { - dAtA[i] = 0x52 + if m.NFS != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n110, err := m.FC.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n110, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n110 } - if m.Flocker != nil { - dAtA[i] = 0x5a + if m.RBD != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n111, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n111, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n111 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.ISCSI != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n112, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n112, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n112 } - if m.AzureFile != nil { - dAtA[i] = 0x6a + if m.Cinder != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n113, err := m.AzureFile.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n113, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n113 } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 + if m.CephFS != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n114, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n114, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n114 } - if m.Quobyte != nil { - dAtA[i] = 0x7a + if m.FC != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n115, err := m.Quobyte.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n115, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n115 } + if m.Flocker != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n116, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n117, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + } + if m.AzureFile != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n118, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n118 + } + if m.VsphereVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n119, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n119 + } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n120, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + } if m.AzureDisk != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n116, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n121, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n121 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6042,11 +6301,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n117, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n122, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n122 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6054,11 +6313,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n118, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n123, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n123 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6066,11 +6325,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n119, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n124, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n124 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6078,11 +6337,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n120, err := m.Local.MarshalTo(dAtA[i:]) + n125, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n125 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6090,11 +6349,23 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n121, err := m.StorageOS.MarshalTo(dAtA[i:]) + n126, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n126 + } + if m.CSI != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n127, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 } return i, nil } @@ -6138,21 +6409,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n122, err := (&v).MarshalTo(dAtA[i:]) + n128, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n128 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n123, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n129, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n129 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6172,11 +6443,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n124, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n130, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n130 } dAtA[i] = 0x2a i++ @@ -6201,6 +6472,12 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -6278,27 +6555,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n125, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n131, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n131 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n126, err := m.Spec.MarshalTo(dAtA[i:]) + n132, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n132 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n127, err := m.Status.MarshalTo(dAtA[i:]) + n133, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n133 return i, nil } @@ -6363,11 +6640,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n128, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n134, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n134 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -6513,19 +6790,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n129, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n135, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n135 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n130, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n136, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n136 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6537,6 +6814,94 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6624,11 +6989,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n131, err := m.ListMeta.MarshalTo(dAtA[i:]) + n137, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n137 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6688,11 +7053,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n132, err := m.SinceTime.MarshalTo(dAtA[i:]) + n138, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n138 } dAtA[i] = 0x30 i++ @@ -6781,11 +7146,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n133, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n139, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n139 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -6836,11 +7201,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n134, err := m.PodController.MarshalTo(dAtA[i:]) + n140, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n140 } return i, nil } @@ -6964,11 +7329,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n135, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n141, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n141 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7000,11 +7365,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n136, err := m.Affinity.MarshalTo(dAtA[i:]) + n142, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n142 } dAtA[i] = 0x9a i++ @@ -7079,6 +7444,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) } + if m.DNSConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n143, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } return i, nil } @@ -7133,11 +7510,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n137, err := m.StartTime.MarshalTo(dAtA[i:]) + n144, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n144 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7188,19 +7565,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n138, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n145, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n145 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n139, err := m.Status.MarshalTo(dAtA[i:]) + n146, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n146 return i, nil } @@ -7222,19 +7599,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n147, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n147 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n141, err := m.Template.MarshalTo(dAtA[i:]) + n148, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n148 return i, nil } @@ -7256,11 +7633,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n142, err := m.ListMeta.MarshalTo(dAtA[i:]) + n149, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n149 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7294,19 +7671,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n143, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n150, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n150 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n144, err := m.Spec.MarshalTo(dAtA[i:]) + n151, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n151 return i, nil } @@ -7386,19 +7763,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n145, err := m.PodSignature.MarshalTo(dAtA[i:]) + n152, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n152 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n146, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n153, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n153 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7431,11 +7808,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n147, err := m.Preference.MarshalTo(dAtA[i:]) + n154, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n154 return i, nil } @@ -7457,11 +7834,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n148, err := m.Handler.MarshalTo(dAtA[i:]) + n155, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n155 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -7611,11 +7988,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n149, err := m.SecretRef.MarshalTo(dAtA[i:]) + n156, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n156 } dAtA[i] = 0x40 i++ @@ -7682,11 +8059,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n150, err := m.SecretRef.MarshalTo(dAtA[i:]) + n157, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n157 } dAtA[i] = 0x40 i++ @@ -7717,11 +8094,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n151, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n158 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -7753,27 +8130,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n152, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n159 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n153, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n160 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n154, err := m.Status.MarshalTo(dAtA[i:]) + n161, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n161 return i, nil } @@ -7803,11 +8180,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n155, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n162, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n162 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7837,11 +8214,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n156, err := m.ListMeta.MarshalTo(dAtA[i:]) + n163, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n163 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7903,11 +8280,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n157, err := m.Template.MarshalTo(dAtA[i:]) + n164, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n164 } dAtA[i] = 0x20 i++ @@ -7986,11 +8363,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n158, err := m.Divisor.MarshalTo(dAtA[i:]) + n165, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n165 return i, nil } @@ -8012,27 +8389,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n166 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n160, err := m.Spec.MarshalTo(dAtA[i:]) + n167, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n167 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n161, err := m.Status.MarshalTo(dAtA[i:]) + n168, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n168 return i, nil } @@ -8054,11 +8431,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n162, err := m.ListMeta.MarshalTo(dAtA[i:]) + n169, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n169 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8113,11 +8490,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n163, err := (&v).MarshalTo(dAtA[i:]) + n170, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n170 } } if len(m.Scopes) > 0 { @@ -8177,11 +8554,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n164, err := (&v).MarshalTo(dAtA[i:]) + n171, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n171 } } if len(m.Used) > 0 { @@ -8208,11 +8585,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n165, err := (&v).MarshalTo(dAtA[i:]) + n172, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n172 } } return i, nil @@ -8257,11 +8634,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n166, err := (&v).MarshalTo(dAtA[i:]) + n173, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n173 } } if len(m.Requests) > 0 { @@ -8288,11 +8665,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n167, err := (&v).MarshalTo(dAtA[i:]) + n174, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n174 } } return i, nil @@ -8359,11 +8736,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n168, err := m.SecretRef.MarshalTo(dAtA[i:]) + n175, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n175 } dAtA[i] = 0x20 i++ @@ -8431,11 +8808,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n169, err := m.SecretRef.MarshalTo(dAtA[i:]) + n176, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n176 } dAtA[i] = 0x20 i++ @@ -8494,11 +8871,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n170, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n177 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -8574,11 +8951,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n171, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n178, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n178 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -8610,11 +8987,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n172, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n179 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -8650,11 +9027,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n173, err := m.ListMeta.MarshalTo(dAtA[i:]) + n180, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n180 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8688,11 +9065,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n174, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n181, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n181 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8812,11 +9189,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n175, err := m.Capabilities.MarshalTo(dAtA[i:]) + n182, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n182 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -8832,11 +9209,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n176, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n183, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n183 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -8894,11 +9271,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n177, err := m.Reference.MarshalTo(dAtA[i:]) + n184, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n184 return i, nil } @@ -8920,27 +9297,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n178, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n185, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n185 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n179, err := m.Spec.MarshalTo(dAtA[i:]) + n186, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n186 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n180, err := m.Status.MarshalTo(dAtA[i:]) + n187, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n187 return i, nil } @@ -8962,11 +9339,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n181, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n188, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n188 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9022,11 +9399,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n182, err := m.ListMeta.MarshalTo(dAtA[i:]) + n189, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n189 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9060,11 +9437,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n183, err := m.ListMeta.MarshalTo(dAtA[i:]) + n190, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9109,11 +9486,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n184, err := m.TargetPort.MarshalTo(dAtA[i:]) + n191, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n191 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -9260,11 +9637,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n185, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n192, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n192 } return i, nil } @@ -9287,11 +9664,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n186, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n193, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n193 return i, nil } @@ -9314,11 +9691,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n187, err := m.ClientIP.MarshalTo(dAtA[i:]) + n194, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n194 } return i, nil } @@ -9362,11 +9739,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n188, err := m.SecretRef.MarshalTo(dAtA[i:]) + n195, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n195 } return i, nil } @@ -9410,11 +9787,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n189, err := m.SecretRef.MarshalTo(dAtA[i:]) + n196, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n196 } return i, nil } @@ -9463,11 +9840,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n190, err := m.Port.MarshalTo(dAtA[i:]) + n197, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n197 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -9506,11 +9883,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n191, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n198, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n198 } return i, nil } @@ -9576,11 +9953,37 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n192, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n199, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n199 + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) return i, nil } @@ -9647,31 +10050,31 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n193, err := m.Secret.MarshalTo(dAtA[i:]) + n200, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n200 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n194, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n201, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n201 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n195, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n202, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n202 } return i, nil } @@ -9695,163 +10098,163 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n196, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n197, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n198, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n199, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n199 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n200, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n201, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n202, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n202 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n203, err := m.ISCSI.MarshalTo(dAtA[i:]) + n203, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n203 } - if m.Glusterfs != nil { - dAtA[i] = 0x4a + if m.EmptyDir != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n204, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n204, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n204 } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n205, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n205, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n205 } - if m.RBD != nil { - dAtA[i] = 0x5a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n206, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n206, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n206 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.GitRepo != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n207, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n207, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n207 } - if m.Cinder != nil { - dAtA[i] = 0x6a + if m.Secret != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n208, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n208, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n208 } - if m.CephFS != nil { - dAtA[i] = 0x72 + if m.NFS != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n209, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n209, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n209 } - if m.Flocker != nil { - dAtA[i] = 0x7a + if m.ISCSI != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n210, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n210, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n210 } + if m.Glusterfs != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n211, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n211 + } + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n212, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n212 + } + if m.RBD != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n213, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n213 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n214, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n214 + } + if m.Cinder != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n215, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n215 + } + if m.CephFS != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n216, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n216 + } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n217, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n217 + } if m.DownwardAPI != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n218, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n218 } if m.FC != nil { dAtA[i] = 0x8a @@ -9859,11 +10262,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n212, err := m.FC.MarshalTo(dAtA[i:]) + n219, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n219 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -9871,11 +10274,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n213, err := m.AzureFile.MarshalTo(dAtA[i:]) + n220, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n213 + i += n220 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -9883,11 +10286,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n214, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n221, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n214 + i += n221 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -9895,11 +10298,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n215, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n222, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n215 + i += n222 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -9907,11 +10310,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n216, err := m.Quobyte.MarshalTo(dAtA[i:]) + n223, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n216 + i += n223 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -9919,11 +10322,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n217, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n224, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n217 + i += n224 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -9931,11 +10334,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n218, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n225, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n225 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -9943,11 +10346,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n219, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n226, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n226 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -9955,11 +10358,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n220, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n227, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n227 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -9967,11 +10370,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n221, err := m.Projected.MarshalTo(dAtA[i:]) + n228, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n228 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -9979,11 +10382,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n222, err := m.StorageOS.MarshalTo(dAtA[i:]) + n229, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n222 + i += n229 } return i, nil } @@ -10043,11 +10446,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n223, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n230, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n223 + i += n230 return i, nil } @@ -10191,6 +10594,17 @@ func (m *Binding) Size() (n int) { return n } +func (m *CSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -10480,6 +10894,12 @@ func (m *Container) Size() (n int) { } l = len(m.TerminationMessagePolicy) n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10822,6 +11242,22 @@ func (m *Event) Size() (n int) { n += 1 + sovGenerated(uint64(m.Count)) l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10839,6 +11275,17 @@ func (m *EventList) Size() (n int) { return n } +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *EventSource) Size() (n int) { var l int _ = l @@ -11027,6 +11474,38 @@ func (m *HostPathVolumeSource) Size() (n int) { return n } +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ISCSIVolumeSource) Size() (n int) { var l int _ = l @@ -11730,6 +12209,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11872,6 +12355,10 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.StorageOS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -11909,6 +12396,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12030,6 +12521,42 @@ func (m *PodCondition) Size() (n int) { return n } +func (m *PodDNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PodExecOptions) Size() (n int) { var l int _ = l @@ -12227,6 +12754,10 @@ func (m *PodSpec) Size() (n int) { if m.Priority != nil { n += 2 + sovGenerated(uint64(*m.Priority)) } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -13125,6 +13656,16 @@ func (m *Volume) Size() (n int) { return n } +func (m *VolumeDevice) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *VolumeMount) Size() (n int) { var l int _ = l @@ -13407,6 +13948,18 @@ func (this *Binding) String() string { }, "") return s } +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *Capabilities) String() string { if this == nil { return "nil" @@ -13610,6 +14163,7 @@ func (this *Container) String() string { `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13883,6 +14437,12 @@ func (this *Event) String() string { `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `}`, }, "") return s @@ -13898,6 +14458,18 @@ func (this *EventList) String() string { }, "") return s } +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} func (this *EventSource) String() string { if this == nil { return "nil" @@ -14064,6 +14636,26 @@ func (this *HostPathVolumeSource) String() string { }, "") return s } +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} func (this *ISCSIVolumeSource) String() string { if this == nil { return "nil" @@ -14673,6 +15265,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14733,7 +15326,7 @@ func (this *PersistentVolumeSource) String() string { `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, @@ -14748,6 +15341,7 @@ func (this *PersistentVolumeSource) String() string { `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, `}`, }, "") return s @@ -14774,6 +15368,7 @@ func (this *PersistentVolumeSpec) String() string { `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14876,6 +15471,29 @@ func (this *PodCondition) String() string { }, "") return s } +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} func (this *PodExecOptions) String() string { if this == nil { return "nil" @@ -15003,6 +15621,7 @@ func (this *PodSpec) String() string { `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, `}`, }, "") return s @@ -15786,6 +16405,17 @@ func (this *Volume) String() string { }, "") return s } +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} func (this *VolumeMount) String() string { if this == nil { return "nil" @@ -16980,6 +17610,134 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -19541,6 +20299,37 @@ func (m *Container) Unmarshal(dAtA []byte) error { } m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22770,6 +23559,189 @@ func (m *Event) Unmarshal(dAtA []byte) error { } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22902,6 +23874,134 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } return nil } +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *EventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -24735,6 +25835,343 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -31838,6 +33275,36 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32583,7 +34050,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} + m.ISCSI = &ISCSIPersistentVolumeSource{} } if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -33051,6 +34518,39 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIPersistentVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33401,6 +34901,36 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34557,6 +36087,254 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodExecOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -36367,6 +38145,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } } m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45543,6 +47354,114 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeMount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47195,749 +49114,777 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 11903 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x70, 0x24, 0xc7, - 0x79, 0x98, 0x66, 0x17, 0xaf, 0xfd, 0xf0, 0xee, 0xc3, 0x91, 0x7b, 0x20, 0xef, 0x70, 0x1c, 0x4a, - 0xe4, 0xf1, 0x05, 0x88, 0x47, 0x52, 0xa4, 0x45, 0x8a, 0x36, 0x80, 0x05, 0xee, 0xc0, 0x3b, 0xdc, - 0x2d, 0x7b, 0x71, 0x77, 0x22, 0x4d, 0x33, 0x1a, 0xec, 0x36, 0x16, 0x43, 0x0c, 0x66, 0x96, 0x33, - 0xb3, 0xb8, 0x03, 0xcb, 0xaa, 0x4a, 0x14, 0x59, 0x79, 0xc8, 0x3f, 0x54, 0x89, 0x2a, 0x71, 0x2c, - 0x95, 0x53, 0x95, 0x47, 0xd9, 0x8a, 0x93, 0x54, 0x1c, 0x39, 0xb2, 0x23, 0x39, 0x15, 0xc7, 0x79, - 0x94, 0xfc, 0x47, 0xb1, 0xf3, 0x47, 0xaa, 0x72, 0x05, 0xb1, 0x4e, 0xa9, 0xa4, 0xf2, 0x23, 0xa9, - 0x24, 0xfe, 0x65, 0xc4, 0x89, 0x52, 0xfd, 0x9c, 0xee, 0xd9, 0x99, 0xdd, 0xc5, 0x11, 0x07, 0x52, - 0x2a, 0xfd, 0xdb, 0xed, 0xef, 0xeb, 0xaf, 0x7b, 0xfa, 0xf1, 0xf5, 0xd7, 0x5f, 0x7f, 0x0f, 0x78, - 0x79, 0xe7, 0xa5, 0x68, 0xde, 0x0d, 0x16, 0x76, 0xda, 0x9b, 0x24, 0xf4, 0x49, 0x4c, 0xa2, 0x85, - 0x3d, 0xe2, 0x37, 0x82, 0x70, 0x41, 0x00, 0x9c, 0x96, 0xbb, 0x50, 0x0f, 0x42, 0xb2, 0xb0, 0xf7, - 0xec, 0x42, 0x93, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x98, 0x6f, 0x85, 0x41, 0x1c, 0x20, 0xc4, 0x71, - 0xe6, 0x9d, 0x96, 0x3b, 0x4f, 0x71, 0xe6, 0xf7, 0x9e, 0x9d, 0x7d, 0xa6, 0xe9, 0xc6, 0xdb, 0xed, - 0xcd, 0xf9, 0x7a, 0xb0, 0xbb, 0xd0, 0x0c, 0x9a, 0xc1, 0x02, 0x43, 0xdd, 0x6c, 0x6f, 0xb1, 0x7f, - 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xae, 0x27, 0xcd, 0x90, 0x3b, 0x31, 0xf1, 0x23, 0x37, 0xf0, - 0xa3, 0x67, 0x9c, 0x96, 0x1b, 0x91, 0x70, 0x8f, 0x84, 0x0b, 0xad, 0x9d, 0x26, 0x85, 0x45, 0x26, - 0xc2, 0xc2, 0xde, 0xb3, 0x9b, 0x24, 0x76, 0x3a, 0x7a, 0x34, 0xfb, 0x7c, 0x42, 0x6e, 0xd7, 0xa9, - 0x6f, 0xbb, 0x3e, 0x09, 0xf7, 0x25, 0x8d, 0x85, 0x90, 0x44, 0x41, 0x3b, 0xac, 0x93, 0x23, 0xd5, - 0x8a, 0x16, 0x76, 0x49, 0xec, 0x64, 0x7c, 0xfd, 0xec, 0x42, 0x5e, 0xad, 0xb0, 0xed, 0xc7, 0xee, - 0x6e, 0x67, 0x33, 0x9f, 0xe8, 0x55, 0x21, 0xaa, 0x6f, 0x93, 0x5d, 0xa7, 0xa3, 0xde, 0x73, 0x79, - 0xf5, 0xda, 0xb1, 0xeb, 0x2d, 0xb8, 0x7e, 0x1c, 0xc5, 0x61, 0xba, 0x92, 0xfd, 0x5d, 0x0b, 0xce, - 0x2f, 0xde, 0xaa, 0xad, 0x78, 0x4e, 0x14, 0xbb, 0xf5, 0x25, 0x2f, 0xa8, 0xef, 0xd4, 0xe2, 0x20, - 0x24, 0x37, 0x03, 0xaf, 0xbd, 0x4b, 0x6a, 0x6c, 0x20, 0xd0, 0xd3, 0x30, 0xb2, 0xc7, 0xfe, 0xaf, - 0x55, 0xca, 0xd6, 0x79, 0xeb, 0x42, 0x69, 0x69, 0xea, 0xdb, 0x07, 0x73, 0x1f, 0xb9, 0x7b, 0x30, - 0x37, 0x72, 0x53, 0x94, 0x63, 0x85, 0x81, 0x1e, 0x83, 0xa1, 0xad, 0x68, 0x63, 0xbf, 0x45, 0xca, - 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x56, 0x6b, 0xb4, 0x14, 0x0b, 0x28, 0x5a, 0x80, 0x52, 0xcb, - 0x09, 0x63, 0x37, 0x76, 0x03, 0xbf, 0x5c, 0x3c, 0x6f, 0x5d, 0x18, 0x5c, 0x9a, 0x16, 0xa8, 0xa5, - 0xaa, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x4e, 0xe3, 0xba, 0xef, 0xed, 0x97, 0x07, 0xce, - 0x5b, 0x17, 0x46, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xcb, 0x05, 0x18, 0x59, 0xdc, - 0xda, 0x72, 0x7d, 0x37, 0xde, 0x47, 0x37, 0x61, 0xcc, 0x0f, 0x1a, 0x44, 0xfe, 0x67, 0x5f, 0x31, - 0x7a, 0xf1, 0xfc, 0x7c, 0xe7, 0xca, 0x9c, 0xbf, 0xa6, 0xe1, 0x2d, 0x4d, 0xdd, 0x3d, 0x98, 0x1b, - 0xd3, 0x4b, 0xb0, 0x41, 0x07, 0x61, 0x18, 0x6d, 0x05, 0x0d, 0x45, 0xb6, 0xc0, 0xc8, 0xce, 0x65, - 0x91, 0xad, 0x26, 0x68, 0x4b, 0x93, 0x77, 0x0f, 0xe6, 0x46, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, - 0x30, 0x49, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x47, 0xf3, 0xe8, 0x6a, 0xa8, 0x4b, - 0xa7, 0xee, 0x1e, 0xcc, 0x4d, 0xa6, 0x0a, 0x71, 0x9a, 0xa0, 0xfd, 0x1e, 0x4c, 0x2c, 0xc6, 0xb1, - 0x53, 0xdf, 0x26, 0x0d, 0x3e, 0x83, 0xe8, 0x79, 0x18, 0xf0, 0x9d, 0x5d, 0x22, 0xe6, 0xf7, 0xbc, - 0x18, 0xd8, 0x81, 0x6b, 0xce, 0x2e, 0x39, 0x3c, 0x98, 0x9b, 0xba, 0xe1, 0xbb, 0xef, 0xb6, 0xc5, - 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0x8b, 0x00, 0x0d, 0xb2, 0xe7, 0xd6, 0x49, 0xd5, 0x89, 0xb7, - 0xc5, 0x7c, 0x23, 0x51, 0x17, 0x2a, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x0e, 0x94, 0x16, 0xf7, 0x02, - 0xb7, 0x51, 0x0d, 0x1a, 0x11, 0xda, 0x81, 0xc9, 0x56, 0x48, 0xb6, 0x48, 0xa8, 0x8a, 0xca, 0xd6, - 0xf9, 0xe2, 0x85, 0xd1, 0x8b, 0x17, 0x32, 0x3f, 0xd6, 0x44, 0x5d, 0xf1, 0xe3, 0x70, 0x7f, 0xe9, - 0x41, 0xd1, 0xde, 0x64, 0x0a, 0x8a, 0xd3, 0x94, 0xed, 0x7f, 0x53, 0x80, 0xd3, 0x8b, 0xef, 0xb5, - 0x43, 0x52, 0x71, 0xa3, 0x9d, 0xf4, 0x0a, 0x6f, 0xb8, 0xd1, 0xce, 0xb5, 0x64, 0x04, 0xd4, 0xd2, - 0xaa, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x33, 0x30, 0x4c, 0x7f, 0xdf, 0xc0, 0x6b, 0xe2, 0x93, 0x4f, - 0x09, 0xe4, 0xd1, 0x8a, 0x13, 0x3b, 0x15, 0x0e, 0xc2, 0x12, 0x07, 0xad, 0xc3, 0x68, 0x9d, 0x6d, - 0xc8, 0xe6, 0x7a, 0xd0, 0x20, 0x6c, 0x32, 0x4b, 0x4b, 0x4f, 0x51, 0xf4, 0xe5, 0xa4, 0xf8, 0xf0, - 0x60, 0xae, 0xcc, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, - 0x12, 0x64, 0xec, 0xad, 0x0b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0x67, - 0x61, 0x60, 0xc7, 0xf5, 0x1b, 0xe5, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x2b, 0xae, 0xdf, 0x38, - 0x3c, 0x98, 0x9b, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc4, 0x82, 0x39, 0x06, 0x5b, - 0x75, 0x3d, 0x52, 0x25, 0x61, 0xe4, 0x46, 0x31, 0xf1, 0x63, 0x63, 0x40, 0x2f, 0x02, 0x44, 0xa4, - 0x1e, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0x6a, 0x0a, 0x82, 0x35, 0x2c, 0xca, 0x10, 0xa2, 0x6d, - 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x35, 0x09, 0xc0, 0x09, 0x8e, 0xc1, 0x10, 0x8a, - 0xbd, 0x18, 0x02, 0xfa, 0x14, 0x4c, 0x26, 0x8d, 0x45, 0x2d, 0xa7, 0x2e, 0x07, 0x90, 0x6d, 0x99, - 0x9a, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0x1f, 0x58, 0x62, 0xf1, 0xd0, 0xaf, 0xfe, 0x90, 0x7f, 0xab, - 0xfd, 0xdb, 0x16, 0x0c, 0x2f, 0xb9, 0x7e, 0xc3, 0xf5, 0x9b, 0xe8, 0x33, 0x30, 0x42, 0xcf, 0xa6, - 0x86, 0x13, 0x3b, 0x82, 0xef, 0x7d, 0x5c, 0xdb, 0x5b, 0xea, 0xa8, 0x98, 0x6f, 0xed, 0x34, 0x69, - 0x41, 0x34, 0x4f, 0xb1, 0xe9, 0x6e, 0xbb, 0xbe, 0xf9, 0x0e, 0xa9, 0xc7, 0xeb, 0x24, 0x76, 0x92, - 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc0, 0x50, 0xec, 0x84, 0x4d, 0x12, 0x0b, 0x06, 0x98, - 0xc9, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0xeb, 0x24, 0x39, 0x16, 0x36, 0x58, 0x55, 0x2c, - 0x48, 0xd8, 0x75, 0x18, 0x5b, 0x76, 0x5a, 0xce, 0xa6, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x38, - 0x14, 0x9d, 0x46, 0x83, 0x71, 0x85, 0xd2, 0xd2, 0xe9, 0xbb, 0x07, 0x73, 0xc5, 0xc5, 0x06, 0x5d, - 0x9e, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x4f, 0xc2, 0x40, 0x23, 0x0c, 0x5a, 0xe5, 0x02, 0xc3, - 0x7c, 0x80, 0xae, 0xe4, 0x4a, 0x18, 0xb4, 0x52, 0xa8, 0x0c, 0xc7, 0xfe, 0xdd, 0x02, 0x3c, 0xbc, - 0x4c, 0x5a, 0xdb, 0xab, 0xb5, 0x9c, 0xf5, 0x7b, 0x01, 0x46, 0x76, 0x03, 0xdf, 0x8d, 0x83, 0x30, - 0x12, 0x4d, 0xb3, 0x0d, 0xb4, 0x2e, 0xca, 0xb0, 0x82, 0xa2, 0xf3, 0x30, 0xd0, 0x4a, 0x98, 0xdf, - 0x98, 0x64, 0x9c, 0x8c, 0xed, 0x31, 0x08, 0xc5, 0x68, 0x47, 0x24, 0x14, 0x1b, 0x5f, 0x61, 0xdc, - 0x88, 0x48, 0x88, 0x19, 0x24, 0x59, 0x41, 0x74, 0x6d, 0x89, 0x55, 0x99, 0x5a, 0x41, 0x14, 0x82, - 0x35, 0x2c, 0x54, 0x85, 0x12, 0xff, 0x87, 0xc9, 0x16, 0xdb, 0xe3, 0x39, 0xe3, 0x5e, 0x93, 0x48, - 0x62, 0xdc, 0xc7, 0xd9, 0x12, 0x93, 0x85, 0x38, 0x21, 0x62, 0x2c, 0xb1, 0xa1, 0x9e, 0x4b, 0xec, - 0x5b, 0x05, 0x40, 0x7c, 0x08, 0x7f, 0xc4, 0x06, 0xee, 0x46, 0xe7, 0xc0, 0x65, 0x1e, 0x36, 0x57, - 0x83, 0xba, 0xe3, 0xa5, 0x57, 0xed, 0x71, 0x8d, 0xde, 0x2f, 0x59, 0x80, 0x96, 0x5d, 0xbf, 0x41, - 0xc2, 0x13, 0x90, 0xb4, 0x8e, 0xc6, 0x3b, 0xae, 0xc2, 0xc4, 0xb2, 0xe7, 0x12, 0x3f, 0x5e, 0xab, - 0x2e, 0x07, 0xfe, 0x96, 0xdb, 0x44, 0x9f, 0x84, 0x09, 0x2a, 0x78, 0x06, 0xed, 0xb8, 0x46, 0xea, - 0x81, 0xcf, 0xce, 0x68, 0x2a, 0xae, 0xa1, 0xbb, 0x07, 0x73, 0x13, 0x1b, 0x06, 0x04, 0xa7, 0x30, - 0xed, 0x3f, 0xa2, 0x1f, 0x1a, 0xec, 0xb6, 0x02, 0x9f, 0xf8, 0xf1, 0x72, 0xe0, 0x37, 0xb8, 0x2c, - 0xf7, 0x49, 0x18, 0x88, 0x69, 0xc7, 0xf9, 0x47, 0x3e, 0x26, 0xa7, 0x96, 0x76, 0xf7, 0xf0, 0x60, - 0xee, 0x81, 0xce, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x53, 0x30, 0x14, 0xc5, 0x4e, 0xdc, 0x8e, - 0xc4, 0x67, 0x3f, 0x22, 0x3f, 0xbb, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x26, 0x55, 0x35, 0x5e, 0x84, - 0x45, 0x05, 0xf4, 0x04, 0x0c, 0xef, 0x92, 0x28, 0x72, 0x9a, 0xf2, 0x18, 0x9e, 0x14, 0x75, 0x87, - 0xd7, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x28, 0x0c, 0x92, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0xc6, 0x05, - 0xe2, 0xe0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x7b, 0x0b, 0x26, 0x55, 0x5f, 0x79, 0x5b, 0x27, - 0xc0, 0x6f, 0xdf, 0x04, 0xa8, 0xcb, 0x0f, 0x8c, 0x18, 0xbf, 0x1b, 0xbd, 0xf8, 0x58, 0xd6, 0x12, - 0xee, 0x1c, 0xc6, 0x84, 0xb2, 0x2a, 0x8a, 0xb0, 0x46, 0xcd, 0xfe, 0x17, 0x16, 0x9c, 0x4a, 0x7d, - 0xd1, 0x55, 0x37, 0x8a, 0xd1, 0x5b, 0x1d, 0x5f, 0x35, 0xdf, 0xdf, 0x57, 0xd1, 0xda, 0xec, 0x9b, - 0xd4, 0x9a, 0x93, 0x25, 0xda, 0x17, 0x5d, 0x86, 0x41, 0x37, 0x26, 0xbb, 0xf2, 0x63, 0x1e, 0xed, - 0xfa, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0xac, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, - 0xbe, 0x6c, 0xd7, 0x9d, 0xd6, 0x09, 0xcc, 0xc5, 0x1a, 0x0c, 0x30, 0xea, 0xbc, 0xe3, 0x8f, 0x67, - 0x77, 0x5c, 0x74, 0x67, 0x9e, 0x0a, 0x53, 0x5c, 0x68, 0x55, 0xcc, 0x8c, 0x16, 0x61, 0x46, 0x62, - 0xf6, 0x45, 0x28, 0x29, 0x04, 0x34, 0x05, 0xc5, 0x1d, 0xc2, 0x2f, 0x2a, 0x25, 0x4c, 0x7f, 0xa2, - 0x19, 0x18, 0xdc, 0x73, 0xbc, 0xb6, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x64, 0xe1, 0x25, 0xcb, 0xfe, - 0x26, 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x4f, 0x30, 0x93, 0xf7, 0x60, 0xc6, 0xcb, 0xe0, 0x61, - 0x62, 0x20, 0xfa, 0xe7, 0x79, 0x0f, 0x8b, 0xbe, 0xce, 0x64, 0x41, 0x71, 0x66, 0x1b, 0xf4, 0x18, - 0x08, 0x5a, 0x74, 0x45, 0x39, 0x1e, 0xeb, 0xaf, 0x10, 0x40, 0xaf, 0x8b, 0x32, 0xac, 0xa0, 0x94, - 0x41, 0xcc, 0xa8, 0xce, 0x5f, 0x21, 0xfb, 0x35, 0xe2, 0x91, 0x7a, 0x1c, 0x84, 0x1f, 0x68, 0xf7, - 0xcf, 0xf2, 0xd1, 0xe7, 0xfc, 0x65, 0x54, 0x10, 0x28, 0x5e, 0x21, 0xfb, 0x7c, 0x2a, 0xf4, 0xaf, - 0x2b, 0x76, 0xfd, 0xba, 0xdf, 0xb0, 0x60, 0x5c, 0x7d, 0xdd, 0x09, 0x6c, 0xa4, 0x25, 0x73, 0x23, - 0x9d, 0xed, 0xba, 0x1e, 0x73, 0xb6, 0xd0, 0x0f, 0x19, 0x0b, 0x10, 0x38, 0xd5, 0x30, 0xa0, 0x43, - 0x43, 0x79, 0xf6, 0x07, 0x39, 0x21, 0xfd, 0x7c, 0xd7, 0x15, 0xb2, 0xbf, 0x11, 0x50, 0xf1, 0x21, - 0xfb, 0xbb, 0x8c, 0x59, 0x1b, 0xe8, 0x3a, 0x6b, 0xbf, 0x59, 0x80, 0xd3, 0x6a, 0x04, 0x8c, 0x03, - 0xfa, 0x47, 0x7d, 0x0c, 0x9e, 0x85, 0xd1, 0x06, 0xd9, 0x72, 0xda, 0x5e, 0xac, 0xee, 0xa2, 0x83, - 0x5c, 0x1f, 0x51, 0x49, 0x8a, 0xb1, 0x8e, 0x73, 0x84, 0x61, 0xfb, 0xb7, 0xc0, 0x78, 0x6f, 0xec, - 0xd0, 0x15, 0x4c, 0xa5, 0x37, 0x4d, 0xa3, 0x30, 0xa6, 0x6b, 0x14, 0x84, 0xf6, 0xe0, 0x51, 0x18, - 0x74, 0x77, 0xe9, 0x59, 0x5c, 0x30, 0x8f, 0xd8, 0x35, 0x5a, 0x88, 0x39, 0x0c, 0x7d, 0x0c, 0x86, - 0xeb, 0xc1, 0xee, 0xae, 0xe3, 0x37, 0xca, 0x45, 0x26, 0x4f, 0x8e, 0xd2, 0xe3, 0x7a, 0x99, 0x17, - 0x61, 0x09, 0x43, 0x0f, 0xc3, 0x80, 0x13, 0x36, 0xa3, 0xf2, 0x00, 0xc3, 0x19, 0xa1, 0x2d, 0x2d, - 0x86, 0xcd, 0x08, 0xb3, 0x52, 0x2a, 0x27, 0xde, 0x0e, 0xc2, 0x1d, 0xd7, 0x6f, 0x56, 0xdc, 0x90, - 0x09, 0x7d, 0x9a, 0x9c, 0x78, 0x4b, 0x41, 0xb0, 0x86, 0x85, 0x56, 0x61, 0xb0, 0x15, 0x84, 0x71, - 0x54, 0x1e, 0x62, 0xc3, 0xfd, 0x48, 0xce, 0x56, 0xe2, 0x5f, 0x5b, 0x0d, 0xc2, 0x38, 0xf9, 0x00, - 0xfa, 0x2f, 0xc2, 0xbc, 0x3a, 0xfa, 0x29, 0x28, 0x12, 0x7f, 0xaf, 0x3c, 0xcc, 0xa8, 0xcc, 0x66, - 0x51, 0x59, 0xf1, 0xf7, 0x6e, 0x3a, 0x61, 0xc2, 0x67, 0x56, 0xfc, 0x3d, 0x4c, 0xeb, 0xa0, 0x37, - 0xa0, 0x24, 0xb5, 0x91, 0x51, 0x79, 0x24, 0x7f, 0x89, 0x61, 0x81, 0x84, 0xc9, 0xbb, 0x6d, 0x37, - 0x24, 0xbb, 0xc4, 0x8f, 0xa3, 0xe4, 0x3e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x37, 0x60, 0x8c, - 0xcb, 0x91, 0xeb, 0x41, 0xdb, 0x8f, 0xa3, 0x72, 0x89, 0x75, 0x2f, 0x53, 0x75, 0x75, 0x33, 0xc1, - 0x5b, 0x9a, 0x11, 0x44, 0xc7, 0xb4, 0xc2, 0x08, 0x1b, 0xa4, 0x10, 0x86, 0x71, 0xcf, 0xdd, 0x23, - 0x3e, 0x89, 0xa2, 0x6a, 0x18, 0x6c, 0x92, 0x32, 0xb0, 0x9e, 0x9f, 0xc9, 0xd6, 0xe8, 0x04, 0x9b, - 0x64, 0x69, 0xfa, 0xee, 0xc1, 0xdc, 0xf8, 0x55, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x1b, 0x30, 0x41, - 0x05, 0x54, 0x37, 0x21, 0x3a, 0xda, 0x8b, 0x28, 0x93, 0x4e, 0xb1, 0x51, 0x09, 0xa7, 0x88, 0xa0, - 0xd7, 0xa0, 0xe4, 0xb9, 0x5b, 0xa4, 0xbe, 0x5f, 0xf7, 0x48, 0x79, 0x8c, 0x51, 0xcc, 0xdc, 0x56, - 0x57, 0x25, 0x12, 0xbf, 0x00, 0xa8, 0xbf, 0x38, 0xa9, 0x8e, 0x6e, 0xc2, 0x03, 0x31, 0x09, 0x77, - 0x5d, 0xdf, 0xa1, 0xdb, 0x41, 0xc8, 0x93, 0x4c, 0x2f, 0x36, 0xce, 0xd6, 0xdb, 0x39, 0x31, 0x74, - 0x0f, 0x6c, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x3a, 0x4c, 0xb2, 0x9d, 0x50, 0x6d, 0x7b, 0x5e, - 0x35, 0xf0, 0xdc, 0xfa, 0x7e, 0x79, 0x82, 0x11, 0xfc, 0x98, 0x54, 0x7c, 0xad, 0x99, 0x60, 0x7a, - 0xe3, 0x4d, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0x14, 0x21, 0xed, 0xd0, 0x8d, 0xf7, 0xe9, 0xfa, - 0x25, 0x77, 0xe2, 0xf2, 0x64, 0xd7, 0xfb, 0xa3, 0x8e, 0xaa, 0xb4, 0x25, 0x7a, 0x21, 0x4e, 0x13, - 0xa4, 0x5b, 0x3b, 0x8a, 0x1b, 0xae, 0x5f, 0x9e, 0x62, 0x1c, 0x43, 0xed, 0x8c, 0x1a, 0x2d, 0xc4, - 0x1c, 0xc6, 0x94, 0x20, 0xf4, 0xc7, 0x75, 0xca, 0x41, 0xa7, 0x19, 0x62, 0xa2, 0x04, 0x91, 0x00, - 0x9c, 0xe0, 0xd0, 0x63, 0x39, 0x8e, 0xf7, 0xcb, 0x88, 0xa1, 0xaa, 0xed, 0xb2, 0xb1, 0xf1, 0x06, - 0xa6, 0xe5, 0xe8, 0x2a, 0x0c, 0x13, 0x7f, 0x6f, 0x35, 0x0c, 0x76, 0xcb, 0xa7, 0xf2, 0xf7, 0xec, - 0x0a, 0x47, 0xe1, 0x0c, 0x3d, 0xb9, 0x00, 0x88, 0x62, 0x2c, 0x49, 0xa0, 0x3b, 0x50, 0xce, 0x98, - 0x11, 0x3e, 0x01, 0x33, 0x6c, 0x02, 0x5e, 0x11, 0x75, 0xcb, 0x1b, 0x39, 0x78, 0x87, 0x5d, 0x60, - 0x38, 0x97, 0xba, 0xbd, 0x09, 0x13, 0x8a, 0xb1, 0xb0, 0xb9, 0x45, 0x73, 0x30, 0x48, 0x39, 0xa6, - 0xbc, 0x52, 0x97, 0xe8, 0x50, 0x32, 0xd5, 0x14, 0xe6, 0xe5, 0x6c, 0x28, 0xdd, 0xf7, 0xc8, 0xd2, - 0x7e, 0x4c, 0xf8, 0xb5, 0xa8, 0xa8, 0x0d, 0xa5, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x5c, 0x30, - 0x49, 0xb8, 0x57, 0x1f, 0xfc, 0xfa, 0x69, 0x18, 0xd9, 0x0e, 0xa2, 0x98, 0x62, 0xb3, 0x36, 0x06, - 0x13, 0x51, 0xe4, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0c, 0xe3, 0x75, 0xbd, 0x01, 0x71, 0xd8, - 0x9c, 0x16, 0x55, 0xcc, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x82, 0x11, 0xf6, 0x40, 0x51, 0x0f, 0x3c, - 0x71, 0x01, 0x93, 0x27, 0xe6, 0x48, 0x55, 0x94, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0xa6, 0x97, 0x62, - 0xda, 0x85, 0xb5, 0xaa, 0x60, 0xf3, 0xea, 0x52, 0x7c, 0x99, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x5a, - 0x41, 0x1b, 0x65, 0x7a, 0xa5, 0x20, 0xa8, 0x0a, 0xc3, 0xb7, 0x1d, 0x37, 0x76, 0xfd, 0xa6, 0x38, - 0xcf, 0x9f, 0xe8, 0xca, 0xf3, 0x59, 0xa5, 0x5b, 0xbc, 0x02, 0x3f, 0x95, 0xc4, 0x1f, 0x2c, 0xc9, - 0x50, 0x8a, 0x61, 0xdb, 0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, - 0x58, 0x92, 0x41, 0x6f, 0x01, 0xc8, 0x75, 0x43, 0x1a, 0xe2, 0x61, 0xe0, 0xe9, 0xde, 0x44, 0x37, - 0x54, 0x9d, 0xa5, 0x09, 0x7a, 0xe6, 0x25, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x99, 0xdc, 0xd3, 0xd9, - 0x19, 0xf4, 0xb3, 0x74, 0xab, 0x3a, 0x61, 0x4c, 0x1a, 0x8b, 0xb1, 0x18, 0x9c, 0x27, 0xfb, 0x13, - 0x5b, 0x37, 0xdc, 0x5d, 0xa2, 0x6f, 0x6b, 0x41, 0x04, 0x27, 0xf4, 0xec, 0xdf, 0x2a, 0x42, 0x39, - 0xaf, 0xbb, 0x74, 0xd1, 0x91, 0x3b, 0x6e, 0xbc, 0x4c, 0xc5, 0x15, 0xcb, 0x5c, 0x74, 0x2b, 0xa2, - 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, 0x91, 0xdb, 0x94, 0xb7, 0x8e, 0xc1, 0x64, 0xf6, 0x6b, 0xac, 0x14, - 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, 0x97, 0x27, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, - 0x57, 0x18, 0x0c, 0xf4, 0x50, 0x18, 0x18, 0x43, 0x34, 0x78, 0xbc, 0x43, 0x84, 0xde, 0x06, 0xd8, - 0x72, 0x7d, 0x37, 0xda, 0x66, 0xd4, 0x87, 0x8e, 0x4c, 0x5d, 0x09, 0x3b, 0xab, 0x8a, 0x0a, 0xd6, - 0x28, 0xa2, 0x17, 0x60, 0x54, 0x6d, 0xc0, 0xb5, 0x4a, 0x79, 0xd8, 0x7c, 0xd6, 0x48, 0xb8, 0x51, - 0x05, 0xeb, 0x78, 0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, - 0xe8, 0x3e, 0xbe, 0xf6, 0xef, 0x15, 0x61, 0xd2, 0x68, 0xac, 0x1d, 0xf5, 0xc1, 0xb3, 0x2e, 0xd1, - 0x83, 0xc8, 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0x56, 0x74, 0x07, 0xf0, 0xfa, - 0xe8, 0x6d, 0x28, 0x79, 0x4e, 0xc4, 0x94, 0x0f, 0x44, 0xec, 0xbb, 0x7e, 0x88, 0x25, 0x82, 0xbe, - 0x13, 0xc5, 0xda, 0x59, 0xc0, 0x69, 0x27, 0x24, 0xe9, 0x89, 0x49, 0x85, 0x13, 0xf9, 0xb4, 0xa9, - 0x3a, 0x41, 0x25, 0x98, 0x7d, 0xcc, 0x61, 0xe8, 0x25, 0x18, 0x0b, 0x09, 0x5b, 0x15, 0xcb, 0x54, - 0xd6, 0x62, 0xcb, 0x6c, 0x30, 0x11, 0xca, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0x22, 0x6b, 0x0f, 0x75, - 0x91, 0xb5, 0x9f, 0x80, 0x61, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x6b, 0xbc, 0x18, 0x4b, 0x78, - 0x7a, 0xc1, 0x8c, 0xf4, 0xb9, 0x60, 0x9e, 0x84, 0x89, 0x8a, 0x43, 0x76, 0x03, 0x7f, 0xc5, 0x6f, - 0xb4, 0x02, 0xd7, 0x8f, 0x51, 0x19, 0x06, 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x00, 0xa5, 0x80, 0x07, - 0xa8, 0xe4, 0x6c, 0xff, 0x61, 0x01, 0xc6, 0x2b, 0xc4, 0x23, 0x31, 0xe1, 0x77, 0x8d, 0x08, 0xad, - 0x02, 0x6a, 0x86, 0x4e, 0x9d, 0x54, 0x49, 0xe8, 0x06, 0x0d, 0x5d, 0x19, 0x59, 0x64, 0x0a, 0x7f, - 0x74, 0xa9, 0x03, 0x8a, 0x33, 0x6a, 0xa0, 0x37, 0x61, 0xbc, 0x15, 0x12, 0x43, 0x87, 0x66, 0xe5, - 0x89, 0x0b, 0x55, 0x1d, 0x91, 0x4b, 0xaa, 0x46, 0x11, 0x36, 0x49, 0xa1, 0x9f, 0x81, 0xa9, 0x20, - 0x6c, 0x6d, 0x3b, 0x7e, 0x85, 0xb4, 0x88, 0xdf, 0xa0, 0xa2, 0xb8, 0xd0, 0x11, 0xcc, 0xdc, 0x3d, - 0x98, 0x9b, 0xba, 0x9e, 0x82, 0xe1, 0x0e, 0x6c, 0xf4, 0x26, 0x4c, 0xb7, 0xc2, 0xa0, 0xe5, 0x34, - 0xd9, 0x42, 0x11, 0x12, 0x07, 0xe7, 0x3e, 0x4f, 0xdf, 0x3d, 0x98, 0x9b, 0xae, 0xa6, 0x81, 0x87, - 0x07, 0x73, 0xa7, 0xd8, 0x40, 0xd1, 0x92, 0x04, 0x88, 0x3b, 0xc9, 0xd8, 0x4d, 0x38, 0x5d, 0x09, - 0x6e, 0xfb, 0xb7, 0x9d, 0xb0, 0xb1, 0x58, 0x5d, 0xd3, 0x2e, 0xf7, 0xd7, 0xe4, 0xe5, 0x92, 0x3f, - 0xbf, 0x66, 0x9e, 0x53, 0x5a, 0x4d, 0x2e, 0xfe, 0xaf, 0xba, 0x1e, 0xc9, 0x51, 0x22, 0xfc, 0xcd, - 0x82, 0xd1, 0x52, 0x82, 0xaf, 0xf4, 0xfe, 0x56, 0xae, 0xde, 0xff, 0x75, 0x18, 0xd9, 0x72, 0x89, - 0xd7, 0xc0, 0x64, 0x4b, 0xcc, 0xcc, 0xe3, 0xf9, 0x2f, 0x4a, 0xab, 0x14, 0x53, 0x2a, 0x8d, 0xf8, - 0xd5, 0x74, 0x55, 0x54, 0xc6, 0x8a, 0x0c, 0xda, 0x81, 0x29, 0x79, 0xf7, 0x91, 0x50, 0xb1, 0x89, - 0x9f, 0xe8, 0x76, 0xa1, 0x32, 0x89, 0xb3, 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x83, 0x30, 0xbd, 0x8b, - 0xee, 0xd2, 0xe3, 0x6a, 0x80, 0x2d, 0x69, 0x76, 0x17, 0x65, 0xd7, 0x6a, 0x56, 0x6a, 0x7f, 0xd5, - 0x82, 0x07, 0x3b, 0x46, 0x46, 0xa8, 0x17, 0x8e, 0x79, 0x16, 0xd2, 0xd7, 0xfd, 0x42, 0xef, 0xeb, - 0xbe, 0xfd, 0x0f, 0x2d, 0x98, 0x59, 0xd9, 0x6d, 0xc5, 0xfb, 0x15, 0xd7, 0x7c, 0x9b, 0x78, 0x11, - 0x86, 0x76, 0x49, 0xc3, 0x6d, 0xef, 0x8a, 0x99, 0x9b, 0x93, 0x2c, 0x7d, 0x9d, 0x95, 0x1e, 0x1e, - 0xcc, 0x8d, 0xd7, 0xe2, 0x20, 0x74, 0x9a, 0x84, 0x17, 0x60, 0x81, 0xce, 0x0e, 0x46, 0xf7, 0x3d, - 0x72, 0xd5, 0xdd, 0x75, 0xe5, 0x0b, 0x61, 0x57, 0x95, 0xd7, 0xbc, 0x1c, 0xd0, 0xf9, 0xd7, 0xdb, - 0x8e, 0x1f, 0xbb, 0xf1, 0xbe, 0x78, 0x76, 0x91, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0xae, 0x05, 0x93, - 0x92, 0x97, 0x2c, 0x36, 0x1a, 0x21, 0x89, 0x22, 0x34, 0x0b, 0x05, 0xb7, 0x25, 0x7a, 0x09, 0xa2, - 0x97, 0x85, 0xb5, 0x2a, 0x2e, 0xb8, 0x2d, 0x54, 0x85, 0x12, 0x7f, 0x68, 0x4c, 0x16, 0x57, 0x5f, - 0xcf, 0x95, 0xac, 0x07, 0x1b, 0xb2, 0x26, 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, - 0xcd, 0xe6, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x80, 0x11, 0x3f, 0x68, 0xf0, 0x77, 0x5f, 0xbe, - 0xa7, 0xd9, 0x92, 0xbd, 0x26, 0xca, 0xb0, 0x82, 0xda, 0xbf, 0x68, 0xc1, 0x98, 0xfc, 0xb2, 0x3e, - 0x05, 0x74, 0xba, 0xb5, 0x12, 0xe1, 0x3c, 0xd9, 0x5a, 0x54, 0xc0, 0x66, 0x10, 0x43, 0xae, 0x2e, - 0x1e, 0x45, 0xae, 0xb6, 0xbf, 0x52, 0x80, 0x09, 0xd9, 0x9d, 0x5a, 0x7b, 0x33, 0x22, 0x31, 0xda, - 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, 0xfb, 0x68, 0xf6, 0x8d, 0xcb, 0x98, 0x9f, 0x44, 0xd4, - 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, 0x30, 0xed, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xdb, - 0xbb, 0x40, 0x9a, 0xfa, 0x19, 0x41, 0x7d, 0xfa, 0x5a, 0x9a, 0x0a, 0xee, 0x24, 0x8c, 0x56, 0xa4, - 0x96, 0xa7, 0xc8, 0x5a, 0x38, 0xdf, 0xad, 0x85, 0x7c, 0x25, 0x8f, 0xfd, 0x3b, 0x16, 0x94, 0x24, - 0xda, 0x49, 0x3c, 0x01, 0xad, 0xc3, 0x70, 0xc4, 0x26, 0x41, 0x0e, 0x8d, 0xdd, 0xad, 0xe3, 0x7c, - 0xbe, 0x92, 0xd3, 0x9c, 0xff, 0x8f, 0xb0, 0xa4, 0xc1, 0xd4, 0xd4, 0xaa, 0xfb, 0x1f, 0x12, 0x35, - 0xb5, 0xea, 0x4f, 0xce, 0x09, 0xf3, 0x5f, 0x59, 0x9f, 0xb5, 0xbb, 0x3c, 0x15, 0x3a, 0x5b, 0x21, - 0xd9, 0x72, 0xef, 0xa4, 0x85, 0xce, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x82, 0xb1, 0xba, 0xd4, - 0xee, 0x26, 0x6c, 0xe0, 0xb1, 0xae, 0xba, 0x72, 0xf5, 0xac, 0xc2, 0x6d, 0xc2, 0x96, 0xb5, 0xfa, - 0xd8, 0xa0, 0x66, 0x3e, 0xcc, 0x17, 0x7b, 0x3d, 0xcc, 0x27, 0x74, 0x73, 0x9f, 0x96, 0xed, 0x5f, - 0xb1, 0x60, 0x88, 0xeb, 0x08, 0xfb, 0x53, 0xaa, 0x6a, 0xcf, 0x44, 0xc9, 0xd8, 0xdd, 0xa4, 0x85, - 0xe2, 0xd5, 0x08, 0xad, 0x43, 0x89, 0xfd, 0x60, 0xba, 0x92, 0x62, 0xbe, 0x31, 0x1c, 0x6f, 0x55, - 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x2f, 0x17, 0x29, 0xab, 0x4a, 0x50, 0x8d, 0x13, - 0xdc, 0xba, 0x7f, 0x27, 0x78, 0xe1, 0x7e, 0x9d, 0xe0, 0x4d, 0x98, 0xac, 0x6b, 0x6f, 0x52, 0xc9, - 0x4c, 0x5e, 0xe8, 0xba, 0x48, 0xb4, 0xe7, 0x2b, 0xae, 0x27, 0x5b, 0x36, 0x89, 0xe0, 0x34, 0x55, - 0xf4, 0xb3, 0x30, 0xc6, 0xe7, 0x59, 0xb4, 0x32, 0xc0, 0x5a, 0xf9, 0x58, 0xfe, 0x7a, 0xd1, 0x9b, - 0x60, 0x2b, 0xb1, 0xa6, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x0b, 0x83, 0x30, 0xb8, 0xb2, 0x47, 0xfc, - 0xf8, 0x04, 0x18, 0x52, 0x1d, 0x26, 0x5c, 0x7f, 0x2f, 0xf0, 0xf6, 0x48, 0x83, 0xc3, 0x8f, 0x72, - 0xb8, 0x3e, 0x20, 0x48, 0x4f, 0xac, 0x19, 0x24, 0x70, 0x8a, 0xe4, 0xfd, 0xb8, 0xb5, 0x5f, 0x82, - 0x21, 0x3e, 0xf7, 0xe2, 0xca, 0x9e, 0xa9, 0x01, 0x67, 0x83, 0x28, 0x76, 0x41, 0xa2, 0x51, 0xe0, - 0x2a, 0x77, 0x51, 0x1d, 0xbd, 0x03, 0x13, 0x5b, 0x6e, 0x18, 0xc5, 0xf4, 0xba, 0x1d, 0xc5, 0xce, - 0x6e, 0xeb, 0x1e, 0x6e, 0xe9, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, 0x19, 0x35, 0x61, 0x9c, - 0x5e, 0x1c, 0x93, 0xa6, 0x86, 0x8f, 0xdc, 0x94, 0x52, 0xc3, 0x5d, 0xd5, 0x09, 0x61, 0x93, 0x2e, - 0x65, 0x26, 0x75, 0x76, 0xd1, 0x1c, 0x61, 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, - 0x27, 0x31, 0x5b, 0x8e, 0x92, 0xc9, 0x93, 0x12, 0x8b, 0x0d, 0xfb, 0x6b, 0xf4, 0x74, 0xa4, 0x63, - 0x78, 0x02, 0x47, 0xcb, 0xab, 0xe6, 0xd1, 0x72, 0x26, 0x77, 0x3e, 0x73, 0x8e, 0x95, 0xcf, 0xc0, - 0xa8, 0x36, 0xdd, 0x68, 0x01, 0x4a, 0x75, 0x69, 0x78, 0x20, 0xb8, 0xae, 0x12, 0x5f, 0x94, 0x45, - 0x02, 0x4e, 0x70, 0xe8, 0x68, 0x50, 0x61, 0x2f, 0x6d, 0xd6, 0x44, 0x45, 0x41, 0xcc, 0x20, 0xf6, - 0x73, 0x00, 0x2b, 0x77, 0x48, 0x7d, 0x91, 0x5f, 0xbc, 0xb4, 0xf7, 0x2d, 0x2b, 0xff, 0x7d, 0xcb, - 0xfe, 0x0f, 0x16, 0x4c, 0xac, 0x2e, 0x1b, 0x02, 0xf9, 0x3c, 0x00, 0x97, 0x42, 0x6f, 0xdd, 0xba, - 0x26, 0x35, 0xc3, 0x5c, 0xb9, 0xa7, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x40, 0xd1, 0x6b, 0xfb, 0x42, - 0x38, 0x1c, 0xbe, 0x7b, 0x30, 0x57, 0xbc, 0xda, 0xf6, 0x31, 0x2d, 0xd3, 0x2c, 0x89, 0x8a, 0x7d, - 0x5b, 0x12, 0xf5, 0x34, 0xc1, 0x46, 0x73, 0x30, 0x78, 0xfb, 0xb6, 0xdb, 0x88, 0xca, 0x83, 0x89, - 0xd6, 0xfa, 0xd6, 0xad, 0xb5, 0x4a, 0x84, 0x79, 0xb9, 0xfd, 0x17, 0x8a, 0x30, 0xb5, 0xea, 0x91, - 0x3b, 0xc6, 0x67, 0x3d, 0x06, 0x43, 0x8d, 0xd0, 0xdd, 0x23, 0x61, 0xfa, 0x14, 0xaf, 0xb0, 0x52, - 0x2c, 0xa0, 0x7d, 0x5b, 0x3f, 0xdd, 0xe8, 0x3c, 0x8f, 0x8f, 0xdb, 0xde, 0xab, 0xf7, 0x50, 0xbc, - 0x05, 0xc3, 0xfc, 0x99, 0x94, 0x0f, 0xc6, 0xe8, 0xc5, 0x67, 0xb3, 0xba, 0x90, 0x1e, 0x8b, 0x79, - 0xa1, 0xf8, 0xe0, 0x36, 0x23, 0x8a, 0x89, 0x89, 0x52, 0x2c, 0x49, 0xce, 0x7e, 0x12, 0xc6, 0x74, - 0xcc, 0x23, 0x19, 0x8f, 0xfc, 0x45, 0x0b, 0x4e, 0xad, 0x7a, 0x41, 0x7d, 0x27, 0x65, 0x8a, 0xf6, - 0x02, 0x8c, 0xd2, 0xfd, 0x14, 0x19, 0x66, 0xad, 0x86, 0xa1, 0xb3, 0x00, 0x61, 0x1d, 0x4f, 0xab, - 0x76, 0xe3, 0xc6, 0x5a, 0x25, 0xcb, 0x3e, 0x5a, 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0xc7, 0x82, 0xb3, - 0x97, 0x96, 0x57, 0x12, 0x6b, 0xcc, 0x0e, 0x13, 0x6d, 0x2a, 0xdc, 0x35, 0xb4, 0xae, 0x24, 0xc2, - 0x5d, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2c, 0xee, 0x07, 0xbf, 0x66, 0xc1, 0xa9, 0x4b, 0x6e, 0x8c, - 0x49, 0x2b, 0x48, 0x1b, 0x0b, 0x87, 0xa4, 0x15, 0x44, 0x6e, 0x1c, 0x84, 0xfb, 0x69, 0x63, 0x61, - 0xac, 0x20, 0x58, 0xc3, 0xe2, 0x2d, 0xef, 0xb9, 0x11, 0xed, 0x69, 0xc1, 0xbc, 0x61, 0x62, 0x51, - 0x8e, 0x15, 0x06, 0xfd, 0xb0, 0x86, 0x1b, 0x32, 0x09, 0x61, 0x5f, 0x6c, 0x67, 0xf5, 0x61, 0x15, - 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x55, 0x0b, 0x4e, 0x5f, 0xf2, 0xda, 0x51, 0x4c, 0xc2, 0xad, 0xc8, - 0xe8, 0xec, 0x73, 0x50, 0x22, 0x52, 0x0a, 0x17, 0x7d, 0x55, 0xe7, 0x86, 0x12, 0xcf, 0xb9, 0xa5, - 0xb2, 0xc2, 0xeb, 0xc3, 0xae, 0xf3, 0x68, 0xf6, 0x88, 0x5f, 0x2f, 0xc0, 0xf8, 0xe5, 0x8d, 0x8d, - 0xea, 0x25, 0x12, 0x0b, 0x96, 0xd9, 0x5b, 0x83, 0x84, 0xb5, 0x8b, 0x70, 0x37, 0x59, 0xa7, 0x1d, - 0xbb, 0xde, 0x3c, 0x77, 0x8d, 0x99, 0x5f, 0xf3, 0xe3, 0xeb, 0x61, 0x2d, 0x0e, 0x5d, 0xbf, 0x99, - 0x79, 0x75, 0x96, 0x8c, 0xbd, 0x98, 0xc7, 0xd8, 0xd1, 0x73, 0x30, 0xc4, 0x7c, 0x73, 0xa4, 0xd4, - 0xf1, 0x90, 0x12, 0x15, 0x58, 0xe9, 0xe1, 0xc1, 0x5c, 0xe9, 0x06, 0x5e, 0xe3, 0x7f, 0xb0, 0x40, - 0x45, 0x37, 0x60, 0x74, 0x3b, 0x8e, 0x5b, 0x97, 0x89, 0xd3, 0x20, 0xa1, 0xe4, 0x0e, 0xe7, 0xb2, - 0xb8, 0x03, 0x1d, 0x04, 0x8e, 0x96, 0x6c, 0xa8, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x06, 0x90, - 0xc0, 0x8e, 0xe9, 0xda, 0x60, 0xff, 0xc0, 0x82, 0xe1, 0xcb, 0x8e, 0xdf, 0xf0, 0x48, 0x88, 0x5e, - 0x81, 0x01, 0x72, 0x87, 0xd4, 0xc5, 0x09, 0x9e, 0xd9, 0xe1, 0xe4, 0x94, 0xe3, 0x4a, 0x30, 0xfa, - 0x1f, 0xb3, 0x5a, 0xe8, 0x32, 0x0c, 0xd3, 0xde, 0x5e, 0x52, 0x36, 0xe3, 0x8f, 0xe4, 0x7d, 0xb1, - 0x9a, 0x76, 0x7e, 0x30, 0x8a, 0x22, 0x2c, 0xab, 0x33, 0x85, 0x4e, 0xbd, 0x55, 0xa3, 0x0c, 0x2c, - 0xee, 0x76, 0xdd, 0xda, 0x58, 0xae, 0x72, 0x24, 0x41, 0x8d, 0x2b, 0x74, 0x64, 0x21, 0x4e, 0x88, - 0xd8, 0x1b, 0x50, 0xa2, 0x93, 0xba, 0xe8, 0xb9, 0x4e, 0x77, 0x5d, 0xd2, 0x53, 0x50, 0x92, 0x7a, - 0x9d, 0x48, 0x98, 0x9d, 0x33, 0xaa, 0x52, 0xed, 0x13, 0xe1, 0x04, 0x6e, 0x6f, 0xc1, 0x0c, 0x7b, - 0x24, 0x75, 0xe2, 0x6d, 0x63, 0x8f, 0xf5, 0x5e, 0xcc, 0x4f, 0x0b, 0xf9, 0x8a, 0xcf, 0x4c, 0x59, - 0xb3, 0x93, 0x1d, 0x93, 0x14, 0x35, 0x59, 0xeb, 0x3f, 0x0f, 0xc0, 0xf4, 0x5a, 0x6d, 0xb9, 0x66, - 0x2a, 0x16, 0x5f, 0x82, 0x31, 0x2e, 0x09, 0xd0, 0x05, 0xed, 0x78, 0xa2, 0x35, 0xf5, 0x70, 0xb0, - 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0xce, 0x42, 0xd1, 0x7d, 0xd7, 0x4f, 0x9b, 0xc2, 0xad, 0xbd, 0x7e, - 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0x42, 0x05, 0x67, 0xa0, 0x0a, 0xac, 0x04, 0x8b, 0x57, 0x61, 0xc2, - 0x8d, 0xea, 0x91, 0xbb, 0xe6, 0x53, 0xee, 0x92, 0xf8, 0x5c, 0x24, 0x12, 0x3f, 0xed, 0xaa, 0x82, - 0xe2, 0x14, 0xb6, 0xc6, 0xcd, 0x07, 0xfb, 0x16, 0x4c, 0x7a, 0x5a, 0x5f, 0x53, 0x99, 0xab, 0xc5, - 0xbe, 0x2e, 0x62, 0x66, 0x39, 0x42, 0xe6, 0xe2, 0x1f, 0x1c, 0x61, 0x09, 0x43, 0x97, 0x60, 0xba, - 0xbe, 0xed, 0xb4, 0x16, 0xdb, 0xf1, 0x76, 0xc5, 0x8d, 0xea, 0xc1, 0x1e, 0x09, 0xf7, 0x99, 0x24, - 0x3c, 0x92, 0x28, 0x99, 0x14, 0x60, 0xf9, 0xf2, 0x62, 0x95, 0x62, 0xe2, 0xce, 0x3a, 0xa6, 0x08, - 0x02, 0xc7, 0x26, 0x82, 0x2c, 0xc2, 0xa4, 0x6c, 0xab, 0x46, 0x22, 0x76, 0x3c, 0x8c, 0xb2, 0xde, - 0x29, 0x97, 0x28, 0x51, 0xac, 0xfa, 0x96, 0xc6, 0x47, 0x2f, 0xc2, 0xb8, 0xeb, 0xbb, 0xb1, 0xeb, - 0xc4, 0x41, 0xc8, 0x0e, 0xd7, 0x31, 0x7e, 0x60, 0x50, 0x0e, 0xbf, 0xa6, 0x03, 0xb0, 0x89, 0x67, - 0xbf, 0x03, 0x25, 0x65, 0x6b, 0x26, 0xcd, 0x25, 0xad, 0x1c, 0x73, 0xc9, 0xde, 0x27, 0x82, 0xd4, - 0x98, 0x17, 0x33, 0x35, 0xe6, 0x7f, 0xcb, 0x82, 0xc4, 0xe4, 0x06, 0x5d, 0x86, 0x52, 0x2b, 0x60, - 0xaf, 0x66, 0xa1, 0x7c, 0x8a, 0x7e, 0x28, 0x93, 0x79, 0x70, 0x46, 0xc5, 0xc7, 0xaf, 0x2a, 0x6b, - 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xe1, 0x56, 0x48, 0x6a, 0x31, 0x73, 0x1a, 0xe9, 0x49, 0x87, 0xaf, - 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0x37, 0x2d, 0x00, 0xae, 0x94, 0x76, 0xfc, 0x26, 0x39, 0x81, - 0x8b, 0x76, 0x05, 0x06, 0xa2, 0x16, 0xa9, 0x77, 0x7b, 0xcf, 0x4c, 0xfa, 0x53, 0x6b, 0x91, 0x7a, - 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x5f, 0x00, 0x98, 0x48, 0xd0, 0xe8, 0x05, 0x08, 0x3d, - 0x63, 0x98, 0xe4, 0x9f, 0x49, 0x99, 0xe4, 0x97, 0x18, 0xb6, 0x66, 0x85, 0xff, 0x0e, 0x14, 0x77, - 0x9d, 0x3b, 0xe2, 0x96, 0xf5, 0x54, 0xf7, 0x6e, 0x50, 0xfa, 0xf3, 0xeb, 0xce, 0x1d, 0x2e, 0xc7, - 0x3e, 0x25, 0x17, 0xc8, 0xba, 0x73, 0xe7, 0x90, 0xbf, 0x5a, 0x32, 0x26, 0x45, 0x2f, 0x73, 0x9f, - 0xfb, 0x4f, 0xc9, 0x7f, 0xb6, 0xec, 0x68, 0x23, 0xac, 0x2d, 0xd7, 0x17, 0x2a, 0xda, 0xbe, 0xda, - 0x72, 0xfd, 0x74, 0x5b, 0xae, 0xdf, 0x47, 0x5b, 0xae, 0x8f, 0xde, 0x83, 0x61, 0xf1, 0x1c, 0xc2, - 0x6c, 0x09, 0x47, 0x2f, 0x2e, 0xf4, 0xd1, 0x9e, 0x78, 0x4d, 0xe1, 0x6d, 0x2e, 0x48, 0x39, 0x5d, - 0x94, 0xf6, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x86, 0x05, 0x13, 0xe2, 0x37, 0x26, 0xef, 0xb6, 0x49, - 0x14, 0x0b, 0x79, 0xe0, 0x13, 0xfd, 0xf7, 0x41, 0x54, 0xe4, 0x5d, 0xf9, 0x84, 0x64, 0xb3, 0x26, - 0xb0, 0x67, 0x8f, 0x52, 0xbd, 0x40, 0xff, 0xd8, 0x82, 0x99, 0x5d, 0xe7, 0x0e, 0x6f, 0x91, 0x97, - 0x61, 0x27, 0x76, 0x03, 0x61, 0x1b, 0xf9, 0x4a, 0x7f, 0xd3, 0xdf, 0x51, 0x9d, 0x77, 0x52, 0x9a, - 0x51, 0xcd, 0x64, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, 0x66, 0xb7, 0x60, 0x44, 0xae, 0xb7, 0x8c, - 0xdb, 0x50, 0x45, 0x17, 0x76, 0x8e, 0xfc, 0x1a, 0xa5, 0xdd, 0x9e, 0x58, 0x3b, 0x62, 0xad, 0xdd, - 0xd7, 0x76, 0xde, 0x81, 0x31, 0x7d, 0x8d, 0xdd, 0xd7, 0xb6, 0xde, 0x85, 0x53, 0x19, 0x6b, 0xe9, - 0xbe, 0x36, 0x79, 0x1b, 0xce, 0xe4, 0xae, 0x8f, 0xfb, 0xd9, 0xb0, 0xfd, 0x75, 0x4b, 0xe7, 0x83, - 0x27, 0xa0, 0x9e, 0x5a, 0x36, 0xd5, 0x53, 0xe7, 0xba, 0xef, 0x9c, 0x1c, 0x1d, 0xd5, 0x5b, 0x7a, - 0xa7, 0x29, 0x57, 0x47, 0xaf, 0xc1, 0x90, 0x47, 0x4b, 0xe4, 0x3b, 0x9c, 0xdd, 0x7b, 0x47, 0x26, - 0xb2, 0x14, 0x2b, 0x8f, 0xb0, 0xa0, 0x60, 0xff, 0xb6, 0x05, 0x03, 0x27, 0x30, 0x12, 0xd8, 0x1c, - 0x89, 0x67, 0x72, 0x49, 0x8b, 0xb8, 0x07, 0xf3, 0xd8, 0xb9, 0xbd, 0x22, 0x63, 0x3b, 0xe4, 0x0c, - 0xcc, 0xff, 0x2d, 0xc0, 0x28, 0x6d, 0x4a, 0x1a, 0x8c, 0xbc, 0x0c, 0xe3, 0x9e, 0xb3, 0x49, 0x3c, - 0xa9, 0x32, 0x4f, 0x5f, 0x62, 0xaf, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x5b, 0xfa, 0xeb, 0x81, - 0x90, 0x5f, 0x54, 0x65, 0xe3, 0x69, 0x01, 0x9b, 0xb8, 0xf4, 0x3e, 0x75, 0xdb, 0x89, 0xeb, 0xdb, - 0xe2, 0x82, 0xab, 0xba, 0x7b, 0x8b, 0x16, 0x62, 0x0e, 0xa3, 0x02, 0x9c, 0x5c, 0x9d, 0x37, 0x49, - 0xc8, 0x04, 0x38, 0x2e, 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0xf0, 0xcd, 0x1b, - 0x64, 0xe6, 0x30, 0x7d, 0xf8, 0xe6, 0xa1, 0x2a, 0xcc, 0xb8, 0x7e, 0xdd, 0x6b, 0x37, 0xc8, 0x0d, - 0x9f, 0x4b, 0x77, 0x9e, 0xfb, 0x1e, 0x69, 0x08, 0x01, 0x5a, 0x59, 0x2e, 0xad, 0x65, 0xe0, 0xe0, - 0xcc, 0x9a, 0xf6, 0x9f, 0x83, 0x53, 0x57, 0x03, 0xa7, 0xb1, 0xe4, 0x78, 0x8e, 0x5f, 0x27, 0xe1, - 0x9a, 0xdf, 0xec, 0xf9, 0x20, 0xaf, 0x3f, 0x9f, 0x17, 0x7a, 0x3d, 0x9f, 0xdb, 0xdb, 0x80, 0xf4, - 0x06, 0x84, 0x19, 0x18, 0x86, 0x61, 0x97, 0x37, 0x25, 0x96, 0xff, 0xe3, 0xd9, 0xd2, 0x75, 0x47, - 0xcf, 0x34, 0x03, 0x27, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x25, 0xc8, 0xf4, 0xcd, 0xe8, 0x7d, 0x95, - 0xb6, 0x5f, 0x80, 0x69, 0x56, 0xf3, 0x68, 0xd7, 0x3c, 0xfb, 0xaf, 0x58, 0x30, 0x79, 0x2d, 0xe5, - 0x4d, 0xfb, 0x18, 0x0c, 0xf1, 0x08, 0x27, 0x69, 0xa5, 0x57, 0x8d, 0x95, 0x62, 0x01, 0x3d, 0x76, - 0x9d, 0xcb, 0x0f, 0x2d, 0x28, 0x29, 0xd7, 0xf7, 0x13, 0x10, 0x6a, 0x97, 0x0d, 0xa1, 0x36, 0x53, - 0x17, 0xa0, 0xba, 0x93, 0x27, 0xd3, 0xa2, 0x2b, 0xca, 0x2f, 0xb4, 0x8b, 0x1a, 0x20, 0x21, 0xc3, - 0xbd, 0x08, 0x27, 0x4c, 0xe7, 0x51, 0xe9, 0x29, 0xca, 0x5e, 0xc4, 0x15, 0xee, 0x87, 0xe4, 0x45, - 0x5c, 0xf5, 0x27, 0x87, 0xfb, 0x55, 0xb5, 0x2e, 0xb3, 0x53, 0xe1, 0xa7, 0x99, 0xd5, 0x28, 0xdb, - 0x9b, 0xca, 0x1d, 0x7b, 0x4e, 0x58, 0x81, 0x8a, 0xd2, 0x43, 0xc6, 0xc8, 0xc4, 0x3f, 0x1e, 0xa6, - 0x20, 0xa9, 0x62, 0x5f, 0x86, 0xc9, 0xd4, 0x80, 0xa1, 0x17, 0x60, 0xb0, 0xb5, 0xed, 0x44, 0x24, - 0x65, 0x05, 0x34, 0x58, 0xa5, 0x85, 0x87, 0x07, 0x73, 0x13, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, - 0xfe, 0x9f, 0x16, 0x0c, 0x5c, 0x0b, 0x1a, 0x27, 0xb1, 0x98, 0x5e, 0x35, 0x16, 0xd3, 0xc3, 0x79, - 0x41, 0x5e, 0x72, 0xd7, 0xd1, 0x6a, 0x6a, 0x1d, 0x9d, 0xcb, 0xa5, 0xd0, 0x7d, 0x09, 0xed, 0xc2, - 0x28, 0x0b, 0x1d, 0x23, 0xac, 0x92, 0x9e, 0x33, 0xee, 0x57, 0x73, 0xa9, 0xfb, 0xd5, 0xa4, 0x86, - 0xaa, 0xdd, 0xb2, 0x9e, 0x80, 0x61, 0x61, 0x19, 0x93, 0xb6, 0x8f, 0x15, 0xb8, 0x58, 0xc2, 0xed, - 0x5f, 0x29, 0x82, 0x11, 0xaa, 0x06, 0xfd, 0x8e, 0x05, 0xf3, 0x21, 0xf7, 0x08, 0x6a, 0x54, 0xda, - 0xa1, 0xeb, 0x37, 0x6b, 0xf5, 0x6d, 0xd2, 0x68, 0x7b, 0xae, 0xdf, 0x5c, 0x6b, 0xfa, 0x81, 0x2a, - 0x5e, 0xb9, 0x43, 0xea, 0x6d, 0xa6, 0x07, 0xef, 0x11, 0x17, 0x47, 0xbd, 0x3c, 0x5f, 0xbc, 0x7b, - 0x30, 0x37, 0x8f, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, 0x3b, 0x16, 0x2c, 0xf0, 0x08, 0x2e, - 0xfd, 0xf7, 0xbf, 0xcb, 0x6d, 0xb4, 0x2a, 0x49, 0x25, 0x44, 0x36, 0x48, 0xb8, 0xbb, 0xf4, 0xa2, - 0x18, 0xd0, 0x85, 0xea, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0xaf, 0x8a, 0x30, 0x4e, 0x47, - 0x31, 0xf1, 0x82, 0x7f, 0xc1, 0x58, 0x12, 0x8f, 0xa4, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x3c, 0x0e, - 0xf0, 0x11, 0x4c, 0x7b, 0x4e, 0x14, 0x5f, 0x26, 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x3d, 0xf5, 0x8a, - 0x65, 0x7e, 0x94, 0xd7, 0x63, 0xa5, 0xfe, 0xba, 0x9a, 0x26, 0x86, 0x3b, 0xe9, 0xa3, 0x3d, 0x40, - 0xec, 0x59, 0x39, 0x74, 0xfc, 0x88, 0x7f, 0x8b, 0x2b, 0x74, 0xe4, 0x47, 0x6b, 0x75, 0x56, 0xb4, - 0x8a, 0xae, 0x76, 0x50, 0xc3, 0x19, 0x2d, 0x68, 0xe6, 0x02, 0x83, 0xfd, 0x9a, 0x0b, 0x0c, 0xf5, - 0x30, 0x42, 0xdf, 0x85, 0x29, 0x31, 0x2b, 0x5b, 0x6e, 0x53, 0x1c, 0xd2, 0x6f, 0xa4, 0xcc, 0x89, - 0xac, 0xfe, 0x0d, 0x1f, 0x7a, 0xd8, 0x12, 0xd9, 0x3f, 0x0f, 0xa7, 0x68, 0x73, 0xa6, 0xc9, 0x74, - 0x84, 0x08, 0x4c, 0xee, 0xb4, 0x37, 0x89, 0x47, 0x62, 0x59, 0x26, 0x1a, 0xcd, 0x14, 0xfb, 0xcd, - 0xda, 0x89, 0x6c, 0x79, 0xc5, 0x24, 0x81, 0xd3, 0x34, 0xed, 0x5f, 0xb5, 0x80, 0x19, 0x26, 0x9e, - 0xc0, 0xf1, 0xf7, 0x29, 0xf3, 0xf8, 0x2b, 0xe7, 0x71, 0xa0, 0x9c, 0x93, 0xef, 0x79, 0x3e, 0x2d, - 0xd5, 0x30, 0xb8, 0xb3, 0x2f, 0x65, 0xff, 0xde, 0x12, 0xd7, 0xff, 0xb1, 0xf8, 0x86, 0x54, 0x0e, - 0x92, 0xe8, 0xb3, 0x30, 0x52, 0x77, 0x5a, 0x4e, 0x9d, 0xc7, 0x08, 0xcb, 0xd5, 0xfe, 0x18, 0x95, - 0xe6, 0x97, 0x45, 0x0d, 0xae, 0xcd, 0xf8, 0xb8, 0xfc, 0x4a, 0x59, 0xdc, 0x53, 0x83, 0xa1, 0x9a, - 0x9c, 0xdd, 0x81, 0x71, 0x83, 0xd8, 0x7d, 0xbd, 0xfa, 0x7e, 0x96, 0x1f, 0x17, 0xea, 0xc6, 0xb2, - 0x0b, 0xd3, 0xbe, 0xf6, 0x9f, 0x32, 0x47, 0x29, 0x4e, 0x7f, 0xb4, 0xd7, 0x81, 0xc0, 0x38, 0xa9, - 0x66, 0x78, 0x99, 0x22, 0x83, 0x3b, 0x29, 0xdb, 0x7f, 0xc7, 0x82, 0x07, 0x75, 0x44, 0xcd, 0x77, - 0xb5, 0x97, 0x3e, 0xb9, 0x02, 0x23, 0x41, 0x8b, 0x84, 0x4e, 0x72, 0x27, 0xbb, 0x20, 0x07, 0xfd, - 0xba, 0x28, 0x3f, 0x3c, 0x98, 0x9b, 0xd1, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x86, 0xd8, - 0x60, 0x44, 0xc2, 0xaf, 0x98, 0xc5, 0xd1, 0x62, 0xcf, 0x5d, 0x11, 0x16, 0x10, 0xfb, 0x17, 0x2c, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77, 0x61, 0x6a, 0x97, 0x5e, 0xdf, 0x56, 0xee, 0xb4, 0x42, 0xae, - 0x46, 0x97, 0xe3, 0xf4, 0x54, 0xaf, 0x71, 0xd2, 0x3e, 0x72, 0xa9, 0x2c, 0xfa, 0x3c, 0xb5, 0x9e, - 0x22, 0x86, 0x3b, 0xc8, 0xdb, 0x7f, 0x5a, 0xe0, 0x3b, 0x91, 0x49, 0x75, 0x4f, 0xc0, 0x70, 0x2b, - 0x68, 0x2c, 0xaf, 0x55, 0xb0, 0x18, 0x21, 0xc5, 0xae, 0xaa, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x08, - 0x40, 0xee, 0xc4, 0x24, 0xf4, 0x1d, 0x4f, 0x3d, 0xc6, 0x2b, 0xe1, 0x69, 0x45, 0x41, 0xb0, 0x86, - 0x45, 0xeb, 0xb4, 0xc2, 0x60, 0xcf, 0x6d, 0x30, 0xc7, 0x8e, 0xa2, 0x59, 0xa7, 0xaa, 0x20, 0x58, - 0xc3, 0xa2, 0x57, 0xe5, 0xb6, 0x1f, 0xf1, 0x03, 0xd0, 0xd9, 0x14, 0xa1, 0x78, 0x46, 0x92, 0xab, - 0xf2, 0x0d, 0x1d, 0x88, 0x4d, 0x5c, 0xb4, 0x08, 0x43, 0xb1, 0xc3, 0x9e, 0x98, 0x07, 0xf3, 0x4d, - 0x76, 0x36, 0x28, 0x86, 0x1e, 0x34, 0x8a, 0x56, 0xc0, 0xa2, 0x22, 0x7a, 0x53, 0xb2, 0x60, 0xce, - 0x92, 0x85, 0xe9, 0x55, 0xee, 0xb2, 0xd5, 0xd9, 0xb7, 0xce, 0x83, 0x85, 0x49, 0x97, 0x41, 0xcb, - 0xfe, 0x7c, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x7b, 0x1d, 0x2c, 0xe2, 0xe9, 0xee, 0xf2, 0xe1, 0xf1, - 0xf1, 0x07, 0xf4, 0x05, 0x0b, 0x46, 0x1d, 0xcf, 0x0b, 0xea, 0x4e, 0xcc, 0x46, 0xb9, 0xd0, 0x9d, - 0x45, 0x89, 0xf6, 0x17, 0x93, 0x1a, 0xbc, 0x0b, 0xcf, 0xc9, 0xd7, 0x63, 0x0d, 0xd2, 0xb3, 0x17, - 0x7a, 0xc3, 0xe8, 0xe3, 0xf2, 0x12, 0xc0, 0x97, 0xc7, 0x6c, 0xfa, 0x12, 0x50, 0x62, 0xdc, 0x58, - 0x93, 0xff, 0xd1, 0x0d, 0x23, 0x66, 0xcd, 0x40, 0xbe, 0x7b, 0xae, 0x21, 0xf4, 0xf4, 0x0a, 0x57, - 0x83, 0xaa, 0xba, 0x09, 0xfa, 0x60, 0xbe, 0x0f, 0xbb, 0x26, 0x5d, 0xf7, 0x30, 0x3f, 0x7f, 0x07, - 0x26, 0x1b, 0xe6, 0x71, 0x2b, 0x56, 0xd3, 0xe3, 0x79, 0x74, 0x53, 0xa7, 0x73, 0x72, 0xc0, 0xa6, - 0x00, 0x38, 0x4d, 0x18, 0x55, 0xb9, 0x33, 0xc0, 0x9a, 0xbf, 0x15, 0x08, 0x13, 0x3e, 0x3b, 0x77, - 0x2e, 0xf7, 0xa3, 0x98, 0xec, 0x52, 0xcc, 0xe4, 0x1c, 0xbd, 0x26, 0xea, 0x62, 0x45, 0x05, 0xbd, - 0x06, 0x43, 0xcc, 0x43, 0x2b, 0x2a, 0x8f, 0xe4, 0xeb, 0x01, 0x4d, 0xe7, 0xe2, 0x64, 0x53, 0xb1, - 0xbf, 0x11, 0x16, 0x14, 0xd0, 0x65, 0x19, 0x22, 0x20, 0x5a, 0xf3, 0x6f, 0x44, 0x84, 0x85, 0x08, - 0x28, 0x2d, 0x7d, 0x34, 0xf1, 0xfe, 0xe7, 0xe5, 0x99, 0xe1, 0x21, 0x8d, 0x9a, 0x54, 0x5e, 0x11, - 0xff, 0x65, 0xd4, 0xc9, 0x32, 0xe4, 0x77, 0xcf, 0x8c, 0x4c, 0x99, 0x0c, 0xe7, 0x4d, 0x93, 0x04, - 0x4e, 0xd3, 0x3c, 0xd1, 0xe3, 0x73, 0xd6, 0x87, 0xa9, 0xf4, 0xc6, 0xba, 0xaf, 0xc7, 0xf5, 0x0f, - 0x06, 0x60, 0xc2, 0x5c, 0x08, 0x68, 0x01, 0x4a, 0x82, 0x88, 0x0a, 0x17, 0xa6, 0xd6, 0xf6, 0xba, - 0x04, 0xe0, 0x04, 0x87, 0x85, 0x4b, 0x63, 0xd5, 0x35, 0xdb, 0xac, 0x24, 0x5c, 0x9a, 0x82, 0x60, - 0x0d, 0x8b, 0x0a, 0xd1, 0x9b, 0x41, 0x10, 0xab, 0xa3, 0x40, 0xad, 0x96, 0x25, 0x56, 0x8a, 0x05, - 0x94, 0x1e, 0x01, 0x3b, 0x24, 0xf4, 0x89, 0x67, 0x6a, 0x32, 0xd5, 0x11, 0x70, 0x45, 0x07, 0x62, - 0x13, 0x97, 0x1e, 0x69, 0x41, 0xc4, 0x96, 0x9f, 0x10, 0xd5, 0x13, 0x5b, 0xb7, 0x1a, 0xf7, 0x50, - 0x94, 0x70, 0xf4, 0x06, 0x3c, 0xa8, 0x1c, 0x0a, 0x31, 0xd7, 0x0c, 0xcb, 0x16, 0x87, 0x8c, 0x9b, - 0xf5, 0x83, 0xcb, 0xd9, 0x68, 0x38, 0xaf, 0x3e, 0x7a, 0x15, 0x26, 0x84, 0x08, 0x2c, 0x29, 0x0e, - 0x9b, 0xc6, 0x0a, 0x57, 0x0c, 0x28, 0x4e, 0x61, 0xa3, 0x0a, 0x4c, 0xd1, 0x12, 0x26, 0x85, 0x4a, - 0x0a, 0xdc, 0x31, 0x52, 0x9d, 0xf5, 0x57, 0x52, 0x70, 0xdc, 0x51, 0x03, 0x2d, 0xc2, 0x24, 0x97, - 0x51, 0xe8, 0x9d, 0x92, 0xcd, 0x83, 0xb0, 0xac, 0x55, 0x1b, 0xe1, 0xba, 0x09, 0xc6, 0x69, 0x7c, - 0xf4, 0x12, 0x8c, 0x39, 0x61, 0x7d, 0xdb, 0x8d, 0x49, 0x3d, 0x6e, 0x87, 0x3c, 0x00, 0x87, 0x66, - 0xed, 0xb1, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x8c, 0xf2, 0xe9, 0xc2, 0x71, - 0x5a, 0xae, 0xfc, 0xa6, 0x94, 0xd5, 0xda, 0x62, 0x75, 0x4d, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, - 0x53, 0x89, 0x6b, 0xa1, 0x61, 0xd5, 0xea, 0x5c, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0xbf, 0x0f, 0xa0, - 0x29, 0x74, 0xfa, 0xb0, 0x59, 0x7a, 0x09, 0xc6, 0x64, 0x3c, 0x63, 0x2d, 0x8e, 0xa6, 0xfa, 0xcc, - 0x4b, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, 0x8a, 0x02, 0x9a, 0xb2, 0x91, 0x4b, 0x62, 0x80, - 0x26, 0x38, 0xe8, 0x69, 0x18, 0x89, 0x88, 0xb7, 0x75, 0xd5, 0xf5, 0x77, 0xc4, 0xc2, 0x56, 0x5c, - 0xb8, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, 0x6c, 0xbb, 0x0d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, - 0xbc, 0xb1, 0x56, 0x39, 0x3c, 0x98, 0x7b, 0x24, 0x2f, 0x4c, 0x33, 0xbd, 0xda, 0x47, 0xf3, 0x74, - 0xfb, 0xd1, 0xca, 0x59, 0x6f, 0x03, 0x43, 0x47, 0x7c, 0x1b, 0xb8, 0x08, 0x20, 0xbe, 0x5a, 0xae, - 0xe5, 0x62, 0x32, 0x6b, 0x97, 0x14, 0x04, 0x6b, 0x58, 0x28, 0x82, 0xe9, 0x7a, 0x48, 0x1c, 0x79, - 0x87, 0xe6, 0xe6, 0xe5, 0x23, 0xf7, 0xae, 0x20, 0x58, 0x4e, 0x13, 0xc3, 0x9d, 0xf4, 0x51, 0x00, - 0xd3, 0x0d, 0xe1, 0xbf, 0x9a, 0x34, 0x5a, 0x3a, 0xba, 0x4d, 0x3b, 0x33, 0xc8, 0x49, 0x13, 0xc2, - 0x9d, 0xb4, 0xd1, 0xdb, 0x30, 0x2b, 0x0b, 0x3b, 0x5d, 0x86, 0xd9, 0x76, 0x29, 0x2e, 0x9d, 0xbb, - 0x7b, 0x30, 0x37, 0x5b, 0xc9, 0xc5, 0xc2, 0x5d, 0x28, 0x20, 0x0c, 0x43, 0xec, 0x2d, 0x29, 0x2a, - 0x8f, 0xb2, 0x73, 0xee, 0xc9, 0x7c, 0x65, 0x00, 0x5d, 0xeb, 0xf3, 0xec, 0x1d, 0x4a, 0x98, 0xf9, - 0x26, 0xcf, 0x72, 0xac, 0x10, 0x0b, 0x4a, 0x68, 0x0b, 0x46, 0x1d, 0xdf, 0x0f, 0x62, 0x87, 0x8b, - 0x50, 0x63, 0xf9, 0xb2, 0x9f, 0x46, 0x78, 0x31, 0xa9, 0xc1, 0xa9, 0x2b, 0xcb, 0x41, 0x0d, 0x82, - 0x75, 0xc2, 0xe8, 0x36, 0x4c, 0x06, 0xb7, 0x29, 0x73, 0x94, 0x5a, 0x8a, 0xa8, 0x3c, 0xce, 0xda, - 0x7a, 0xbe, 0x4f, 0x3d, 0xad, 0x51, 0x59, 0xe3, 0x5a, 0x26, 0x51, 0x9c, 0x6e, 0x05, 0xcd, 0x1b, - 0xda, 0xea, 0x89, 0xc4, 0x9e, 0x3d, 0xd1, 0x56, 0xeb, 0xca, 0x69, 0xe6, 0x82, 0xce, 0xcd, 0x56, - 0xd9, 0xee, 0x9f, 0x4c, 0xb9, 0xa0, 0x27, 0x20, 0xac, 0xe3, 0xa1, 0x6d, 0x18, 0x4b, 0x9e, 0xac, - 0xc2, 0x88, 0x45, 0xa8, 0x19, 0xbd, 0x78, 0xb1, 0xbf, 0x8f, 0x5b, 0xd3, 0x6a, 0xf2, 0x9b, 0x83, - 0x5e, 0x82, 0x0d, 0xca, 0xb3, 0x3f, 0x05, 0xa3, 0xda, 0xc4, 0x1e, 0xc5, 0x2a, 0x7b, 0xf6, 0x55, - 0x98, 0x4a, 0x4f, 0xdd, 0x91, 0xac, 0xba, 0xff, 0x77, 0x01, 0x26, 0x33, 0x5e, 0xae, 0x58, 0xa8, - 0xe7, 0x14, 0x43, 0x4d, 0x22, 0x3b, 0x9b, 0x6c, 0xb1, 0xd0, 0x07, 0x5b, 0x94, 0x3c, 0xba, 0x98, - 0xcb, 0xa3, 0x05, 0x2b, 0x1c, 0x78, 0x3f, 0xac, 0xd0, 0x3c, 0x7d, 0x06, 0xfb, 0x3a, 0x7d, 0x8e, - 0x81, 0x7d, 0x1a, 0x07, 0xd8, 0x70, 0x1f, 0x07, 0xd8, 0x97, 0x0b, 0x30, 0x95, 0x8e, 0x27, 0x7c, - 0x02, 0xef, 0x1d, 0xaf, 0x19, 0xef, 0x1d, 0xd9, 0x81, 0xd3, 0xd3, 0x51, 0x8e, 0xf3, 0xde, 0x3e, - 0x70, 0xea, 0xed, 0xe3, 0xc9, 0xbe, 0xa8, 0x75, 0x7f, 0x07, 0xf9, 0xbb, 0x05, 0x38, 0x9d, 0xae, - 0xb2, 0xec, 0x39, 0xee, 0xee, 0x09, 0x8c, 0xcd, 0x75, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, - 0xd7, 0x72, 0x07, 0xe8, 0x56, 0x6a, 0x80, 0x16, 0xfa, 0x27, 0xd9, 0x7d, 0x94, 0xbe, 0x5b, 0x84, - 0x73, 0x99, 0xf5, 0x92, 0xe7, 0x82, 0x55, 0xe3, 0xb9, 0xe0, 0x62, 0xea, 0xb9, 0xc0, 0xee, 0x5e, - 0xfb, 0x78, 0xde, 0x0f, 0x84, 0xe7, 0x19, 0x8b, 0x9e, 0x76, 0x8f, 0x6f, 0x07, 0x86, 0xe7, 0x99, - 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4e, 0x6f, 0x06, 0xbf, 0x6f, 0xc1, 0x99, 0xcc, 0xb9, 0x39, 0x01, - 0xbd, 0xfa, 0x35, 0x53, 0xaf, 0xfe, 0x44, 0xdf, 0xab, 0x35, 0x47, 0xd1, 0xfe, 0xd5, 0x62, 0xce, - 0xb7, 0x30, 0xcd, 0xe4, 0x75, 0x18, 0x75, 0xea, 0x75, 0x12, 0x45, 0xeb, 0x41, 0x43, 0x05, 0x2b, - 0x7b, 0x86, 0x49, 0x1b, 0x49, 0xf1, 0xe1, 0xc1, 0xdc, 0x6c, 0x9a, 0x44, 0x02, 0xc6, 0x3a, 0x05, - 0x33, 0x00, 0x62, 0xe1, 0x58, 0x03, 0x20, 0x5e, 0x04, 0xd8, 0x53, 0xfa, 0x8a, 0xb4, 0x9a, 0x53, - 0xd3, 0x64, 0x68, 0x58, 0xe8, 0xe7, 0xd8, 0x2d, 0x80, 0x1b, 0x03, 0xf1, 0xa5, 0xf8, 0x5c, 0x9f, - 0x73, 0xa5, 0x1b, 0x16, 0x71, 0x17, 0x67, 0xa5, 0x12, 0x56, 0x24, 0xd1, 0xcf, 0xc0, 0x54, 0xc4, - 0x23, 0x68, 0x2c, 0x7b, 0x4e, 0xc4, 0x1c, 0x6b, 0xc4, 0x2a, 0x64, 0x7e, 0xcb, 0xb5, 0x14, 0x0c, - 0x77, 0x60, 0xdb, 0x5f, 0x1e, 0x80, 0x87, 0xba, 0x30, 0x1f, 0xb4, 0x68, 0x3e, 0xde, 0x3f, 0x95, - 0xd6, 0xdb, 0xcd, 0x66, 0x56, 0x36, 0x14, 0x79, 0xa9, 0x39, 0x2e, 0xbc, 0xef, 0x39, 0xfe, 0xa2, - 0xa5, 0x69, 0x54, 0xb9, 0x89, 0xef, 0xa7, 0x8e, 0xc8, 0x54, 0x8f, 0x51, 0xc5, 0xba, 0x95, 0xa1, - 0xa7, 0xbc, 0xd8, 0x77, 0x77, 0xfa, 0x56, 0x5c, 0x9e, 0xec, 0x53, 0xcf, 0xe7, 0x2c, 0x78, 0x24, - 0xb3, 0xbf, 0x86, 0xb1, 0xd1, 0x02, 0x94, 0xea, 0xb4, 0x50, 0x73, 0xb2, 0x4b, 0x5c, 0x5d, 0x25, - 0x00, 0x27, 0x38, 0x86, 0x4d, 0x51, 0xa1, 0xa7, 0x4d, 0xd1, 0xbf, 0xb4, 0x60, 0x26, 0xdd, 0x89, - 0x13, 0xe0, 0x80, 0x6b, 0x26, 0x07, 0xfc, 0x68, 0x3f, 0x73, 0x99, 0x67, 0x5d, 0x38, 0x01, 0x0f, - 0xe4, 0xe4, 0x8b, 0xd8, 0x83, 0xe9, 0x66, 0x9d, 0x98, 0xee, 0x8b, 0xe2, 0x63, 0x32, 0x3d, 0x3d, - 0xbb, 0xfa, 0x3a, 0xf2, 0x8b, 0x6c, 0x07, 0x0a, 0xee, 0x6c, 0x02, 0x7d, 0xce, 0x82, 0x19, 0xe7, - 0x76, 0xd4, 0x91, 0xbf, 0x49, 0xac, 0x99, 0xe7, 0x33, 0xf5, 0xab, 0x3d, 0xf2, 0x3d, 0x31, 0x17, - 0xa3, 0x99, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x2e, 0x24, 0x95, 0x93, 0xbb, 0x38, 0xd8, - 0x66, 0xb9, 0x3f, 0x71, 0x5e, 0x28, 0x21, 0x58, 0xd1, 0x41, 0x37, 0xa1, 0xd4, 0x94, 0x3e, 0x89, - 0x82, 0xd7, 0x66, 0x1e, 0x5e, 0x99, 0x8e, 0x8b, 0xdc, 0xe7, 0x43, 0x81, 0x70, 0x42, 0x0a, 0xbd, - 0x0a, 0x45, 0x7f, 0x2b, 0xea, 0x96, 0x30, 0x23, 0x65, 0x83, 0xc7, 0x3d, 0xa5, 0xaf, 0xad, 0xd6, - 0x30, 0xad, 0x88, 0x2e, 0x43, 0x31, 0xdc, 0x6c, 0x88, 0x27, 0x81, 0x4c, 0x79, 0x12, 0x2f, 0x55, - 0xb2, 0x17, 0x09, 0xa7, 0x84, 0x97, 0x2a, 0x98, 0x92, 0x40, 0xab, 0x30, 0xc8, 0x9c, 0x9d, 0x84, - 0xe6, 0x3f, 0x33, 0xe6, 0x43, 0x87, 0x23, 0x17, 0x77, 0xa2, 0x66, 0xc5, 0x98, 0x57, 0x47, 0xaf, - 0xc1, 0x50, 0x9d, 0x65, 0x92, 0x10, 0x6a, 0x9a, 0xec, 0x38, 0x26, 0x1d, 0xb9, 0x26, 0xf8, 0x7b, - 0x27, 0x2f, 0xc7, 0x82, 0x02, 0xda, 0x80, 0xa1, 0x3a, 0x69, 0x6d, 0x6f, 0x45, 0x42, 0xfb, 0xf2, - 0xf1, 0x4c, 0x5a, 0x5d, 0x12, 0xa7, 0x08, 0xaa, 0x0c, 0x03, 0x0b, 0x5a, 0xe8, 0x93, 0x50, 0xd8, - 0xaa, 0x0b, 0xbf, 0xa7, 0x4c, 0x8d, 0xbf, 0xe9, 0xd8, 0xbe, 0x34, 0x74, 0xf7, 0x60, 0xae, 0xb0, - 0xba, 0x8c, 0x0b, 0x5b, 0x75, 0x74, 0x0d, 0x86, 0xb7, 0xb8, 0x77, 0xb2, 0x88, 0xf8, 0xfb, 0x78, - 0xb6, 0xe3, 0x74, 0x87, 0x03, 0x33, 0xf7, 0xd7, 0x11, 0x00, 0x2c, 0x89, 0xa0, 0x0d, 0x80, 0x2d, - 0xe5, 0x65, 0x2d, 0x42, 0xfe, 0x7e, 0xb4, 0x1f, 0x5f, 0x6c, 0xa1, 0x8a, 0x50, 0xa5, 0x58, 0xa3, - 0x83, 0x3e, 0x03, 0x25, 0x47, 0xe6, 0x06, 0x62, 0xe1, 0x7e, 0x4d, 0xc9, 0x20, 0xd9, 0x7a, 0xdd, - 0xd3, 0x26, 0xf1, 0x75, 0xab, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc0, 0xf8, 0x5e, 0xd4, 0xda, 0x26, - 0x72, 0xab, 0xb2, 0x18, 0xc0, 0x39, 0x47, 0xd3, 0x4d, 0x81, 0xe8, 0x86, 0x71, 0xdb, 0xf1, 0x3a, - 0xb8, 0x0b, 0x73, 0xee, 0xba, 0xa9, 0x13, 0xc3, 0x26, 0x6d, 0x3a, 0xe8, 0xef, 0xb6, 0x83, 0xcd, - 0xfd, 0x98, 0x88, 0xc8, 0xc0, 0x99, 0x83, 0xfe, 0x3a, 0x47, 0xe9, 0x1c, 0x74, 0x01, 0xc0, 0x92, - 0x08, 0xdd, 0xcc, 0x8e, 0xcc, 0xbb, 0x25, 0xf4, 0x2d, 0x4f, 0xe4, 0x0e, 0x4f, 0x47, 0x7f, 0x93, - 0x41, 0x61, 0x5c, 0x30, 0x21, 0xc5, 0xb8, 0x5f, 0x6b, 0x3b, 0x88, 0x03, 0x3f, 0xc5, 0x79, 0xa7, - 0xf3, 0xb9, 0x5f, 0x35, 0x03, 0xbf, 0x93, 0xfb, 0x65, 0x61, 0xe1, 0xcc, 0xb6, 0x50, 0x03, 0x26, - 0x5a, 0x41, 0x18, 0xdf, 0x0e, 0x42, 0xb9, 0xaa, 0x50, 0x97, 0x8b, 0xb8, 0x81, 0x29, 0x5a, 0x64, - 0xb6, 0xda, 0x26, 0x04, 0xa7, 0x68, 0xa2, 0x4f, 0xc3, 0x70, 0x54, 0x77, 0x3c, 0xb2, 0x76, 0xbd, - 0x7c, 0x2a, 0xff, 0x58, 0xa9, 0x71, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47, 0xa0, 0x60, 0x49, 0x8e, - 0xf2, 0x21, 0x16, 0x6e, 0x9e, 0x05, 0x35, 0xce, 0xe1, 0x43, 0x1d, 0xf6, 0xcc, 0x9c, 0x0f, 0xb1, - 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xe4, 0xd1, 0x20, 0x2a, 0x9f, 0xce, 0xdf, 0x03, 0x42, 0x8c, - 0xbd, 0x5e, 0xeb, 0xb6, 0x07, 0x14, 0x12, 0x4e, 0x88, 0xda, 0xdf, 0x1c, 0xea, 0x94, 0x20, 0xd8, - 0xbd, 0xe3, 0xf3, 0x56, 0xc7, 0xa3, 0xfc, 0x27, 0xfa, 0x55, 0x83, 0x1c, 0xa3, 0xec, 0xf8, 0x39, - 0x0b, 0x1e, 0x68, 0x65, 0x7e, 0x94, 0x38, 0x8e, 0xfb, 0xd3, 0xa6, 0xf0, 0x61, 0x50, 0xe1, 0xc2, - 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xb4, 0x7c, 0x5e, 0x7c, 0xdf, 0xf2, 0xf9, 0x3a, 0x8c, 0x30, 0x91, - 0x2f, 0x09, 0x4f, 0xd4, 0x97, 0x69, 0x1b, 0x3b, 0xd8, 0x97, 0x45, 0x45, 0xac, 0x48, 0xa0, 0x5f, - 0xb4, 0xe0, 0x6c, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x08, 0x75, 0xc9, 0xaf, 0x3c, 0xab, 0xe2, 0xfb, - 0xcf, 0x56, 0xbb, 0x21, 0x1f, 0xf6, 0x42, 0xc0, 0xdd, 0x1b, 0x43, 0x95, 0x8c, 0x3b, 0xd7, 0x90, - 0xf9, 0x66, 0xd7, 0xfb, 0xde, 0x85, 0x9e, 0x87, 0xb1, 0xdd, 0xa0, 0xed, 0x4b, 0xbf, 0x13, 0xe1, - 0x55, 0xcc, 0xf4, 0xc3, 0xeb, 0x5a, 0x39, 0x36, 0xb0, 0x4e, 0xf6, 0x0e, 0xf0, 0x35, 0x2b, 0x43, - 0x78, 0xe5, 0xb7, 0xc2, 0x57, 0xcc, 0x5b, 0xe1, 0x63, 0xe9, 0x5b, 0x61, 0x87, 0xf6, 0xce, 0xb8, - 0x10, 0xf6, 0x1f, 0xc2, 0xb7, 0xdf, 0xf8, 0x4d, 0xb6, 0x07, 0xe7, 0x7b, 0xb1, 0x69, 0x66, 0xde, - 0xd7, 0x50, 0xef, 0xde, 0x89, 0x79, 0x5f, 0x63, 0xad, 0x82, 0x19, 0xa4, 0xdf, 0x48, 0x20, 0xf6, - 0x7f, 0xb7, 0xa0, 0x58, 0x0d, 0x1a, 0x27, 0xa0, 0x8d, 0xfc, 0x94, 0xa1, 0x8d, 0x7c, 0x28, 0x27, - 0x9f, 0x67, 0xae, 0xee, 0x71, 0x25, 0xa5, 0x7b, 0x3c, 0x9b, 0x47, 0xa0, 0xbb, 0xa6, 0xf1, 0xef, - 0x15, 0x41, 0xcf, 0x3e, 0x8a, 0xfe, 0xf5, 0xbd, 0xd8, 0x89, 0x17, 0xbb, 0x25, 0x24, 0x15, 0x94, - 0x99, 0x55, 0xa0, 0x74, 0x41, 0xfd, 0x11, 0x33, 0x17, 0xbf, 0x45, 0xdc, 0xe6, 0x76, 0x4c, 0x1a, - 0xe9, 0xcf, 0x39, 0x39, 0x73, 0xf1, 0xff, 0x62, 0xc1, 0x64, 0xaa, 0x75, 0xe4, 0x65, 0xf9, 0xb3, - 0xdd, 0xa3, 0x16, 0x6a, 0xba, 0xa7, 0x03, 0xdc, 0x3c, 0x80, 0x7a, 0xea, 0x91, 0x9a, 0x1e, 0x26, - 0x05, 0xab, 0xb7, 0xa0, 0x08, 0x6b, 0x18, 0xe8, 0x05, 0x18, 0x8d, 0x83, 0x56, 0xe0, 0x05, 0xcd, - 0xfd, 0x2b, 0x44, 0xc6, 0x9e, 0x51, 0x0f, 0x72, 0x1b, 0x09, 0x08, 0xeb, 0x78, 0xf6, 0xaf, 0x15, - 0x21, 0x9d, 0xb1, 0xf6, 0x27, 0x6b, 0xf2, 0xc3, 0xb9, 0x26, 0xbf, 0x6b, 0xc1, 0x14, 0x6d, 0x9d, - 0x59, 0x5c, 0x49, 0x43, 0x6b, 0x95, 0xeb, 0xc3, 0xea, 0x92, 0xeb, 0xe3, 0x31, 0xca, 0xbb, 0x1a, - 0x41, 0x3b, 0x16, 0x9a, 0x22, 0x8d, 0x39, 0xd1, 0x52, 0x2c, 0xa0, 0x02, 0x8f, 0x84, 0xa1, 0xf0, - 0x52, 0xd3, 0xf1, 0x48, 0x18, 0x62, 0x01, 0x95, 0xa9, 0x40, 0x06, 0x72, 0x52, 0x81, 0xb0, 0xb0, - 0x6d, 0xc2, 0xca, 0x47, 0x08, 0x14, 0x5a, 0xd8, 0x36, 0x69, 0xfe, 0x93, 0xe0, 0xd8, 0x5f, 0x2f, - 0xc2, 0x58, 0x35, 0x68, 0x24, 0x8f, 0x2d, 0xcf, 0x1b, 0x8f, 0x2d, 0xe7, 0x53, 0x8f, 0x2d, 0x53, - 0x3a, 0xee, 0x4f, 0x9e, 0x56, 0x3e, 0xa8, 0xa7, 0x95, 0x3f, 0xb3, 0x60, 0xa2, 0x1a, 0x34, 0xe8, - 0x02, 0xfd, 0x71, 0x5a, 0x8d, 0x7a, 0x50, 0xc0, 0xa1, 0x2e, 0x41, 0x01, 0xff, 0xbe, 0x05, 0xc3, - 0xd5, 0xa0, 0x71, 0x02, 0x5a, 0xd4, 0x57, 0x4c, 0x2d, 0xea, 0x83, 0x39, 0x5c, 0x36, 0x47, 0x71, - 0xfa, 0x8d, 0x22, 0x8c, 0xd3, 0x7e, 0x06, 0x4d, 0x39, 0x4b, 0xc6, 0x88, 0x58, 0x7d, 0x8c, 0x08, - 0x15, 0xe6, 0x02, 0xcf, 0x0b, 0x6e, 0xa7, 0x67, 0x6c, 0x95, 0x95, 0x62, 0x01, 0x45, 0x4f, 0xc3, - 0x48, 0x2b, 0x24, 0x7b, 0x6e, 0xd0, 0x8e, 0xd2, 0x7e, 0xae, 0x55, 0x51, 0x8e, 0x15, 0x06, 0x95, - 0xdb, 0x23, 0xd7, 0xaf, 0x13, 0x69, 0xf9, 0x33, 0xc0, 0x2c, 0x7f, 0x78, 0x5c, 0x55, 0xad, 0x1c, - 0x1b, 0x58, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0x6f, 0x8e, 0x9e, 0xe9, 0x43, 0x04, 0x33, 0x17, - 0x04, 0x70, 0x42, 0x0b, 0x5d, 0x04, 0x88, 0xa5, 0x8d, 0x52, 0x24, 0xdc, 0xb0, 0x95, 0x44, 0xa9, - 0xac, 0x97, 0x22, 0xac, 0x61, 0xa1, 0xa7, 0xa0, 0x14, 0x3b, 0xae, 0x77, 0xd5, 0xf5, 0x49, 0x24, - 0x6c, 0xbc, 0x44, 0xac, 0x72, 0x51, 0x88, 0x13, 0x38, 0x3d, 0xd1, 0x99, 0x93, 0x3f, 0xcf, 0x13, - 0x34, 0xc2, 0xb0, 0xd9, 0x89, 0x7e, 0x55, 0x95, 0x62, 0x0d, 0xc3, 0x7e, 0x09, 0x4e, 0x57, 0x83, - 0x46, 0x35, 0x08, 0xe3, 0xd5, 0x20, 0xbc, 0xed, 0x84, 0x0d, 0x39, 0x7f, 0x73, 0x32, 0x6c, 0x36, - 0x3d, 0x75, 0x07, 0xb9, 0x36, 0xc0, 0x08, 0x88, 0xfd, 0x1c, 0x3b, 0xd3, 0x8f, 0xe8, 0x90, 0xf3, - 0xef, 0x0a, 0x80, 0xaa, 0xcc, 0x8a, 0xca, 0x48, 0x26, 0xf5, 0x36, 0x4c, 0x44, 0xe4, 0xaa, 0xeb, - 0xb7, 0xef, 0xc8, 0xfb, 0x55, 0x17, 0x6f, 0xa7, 0xda, 0x8a, 0x8e, 0xc9, 0x75, 0x2b, 0x66, 0x19, - 0x4e, 0x51, 0xa3, 0x43, 0x18, 0xb6, 0xfd, 0xc5, 0xe8, 0x46, 0x44, 0x42, 0x91, 0x3c, 0x89, 0x0d, - 0x21, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x25, 0xc3, 0xfe, 0x5c, 0x0b, 0x7c, 0x1c, 0x04, 0xb1, 0x5c, - 0x64, 0x2c, 0xfd, 0x86, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0x6a, 0xb7, 0x5a, 0x1e, 0x7b, - 0x9a, 0x74, 0xbc, 0x4b, 0x61, 0xd0, 0x6e, 0xf1, 0xe7, 0x25, 0x91, 0xb9, 0xa2, 0xd6, 0x01, 0xc5, - 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0xe1, 0xe7, 0xcf, 0xb5, 0x9c, 0x35, 0x56, 0x84, - 0x25, 0xcc, 0xfe, 0x2c, 0x3b, 0xcc, 0x58, 0xce, 0x9b, 0xb8, 0x1d, 0x12, 0xb4, 0x0b, 0xe3, 0x2d, - 0x76, 0x60, 0xc5, 0x61, 0xe0, 0x79, 0x44, 0xca, 0x8d, 0xf7, 0x66, 0xd1, 0xc5, 0x73, 0x60, 0xe8, - 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xfc, 0x24, 0xe3, 0x4b, 0x35, 0x7e, 0x69, 0x19, 0x16, 0x76, 0xda, - 0x42, 0x42, 0x9b, 0xcd, 0xcf, 0x31, 0x97, 0x70, 0x7a, 0x61, 0xeb, 0x8d, 0x65, 0x5d, 0xf4, 0x3a, - 0x7b, 0x97, 0xe3, 0xcc, 0xa0, 0x57, 0x76, 0x4b, 0x8e, 0x65, 0x3c, 0xc1, 0x89, 0x8a, 0x58, 0x23, - 0x82, 0xae, 0xc2, 0xb8, 0x48, 0x91, 0x22, 0x14, 0x0f, 0x45, 0xe3, 0xfa, 0x3b, 0x8e, 0x75, 0xe0, - 0x61, 0xba, 0x00, 0x9b, 0x95, 0x51, 0x13, 0xce, 0x6a, 0x09, 0xbd, 0x32, 0xac, 0x0a, 0x39, 0x6f, - 0x79, 0xe4, 0xee, 0xc1, 0xdc, 0xd9, 0x8d, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x1d, 0x4e, 0x3b, - 0xf5, 0xd8, 0xdd, 0x23, 0x15, 0xe2, 0x34, 0x3c, 0xd7, 0x27, 0x66, 0xe0, 0x87, 0x33, 0x77, 0x0f, - 0xe6, 0x4e, 0x2f, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0x15, 0x28, 0x35, 0xfc, 0x48, 0x8c, 0xc1, - 0x90, 0x91, 0xab, 0xae, 0x54, 0xb9, 0x56, 0x53, 0xdf, 0x9f, 0xfc, 0xc1, 0x49, 0x05, 0xd4, 0x84, - 0x31, 0xdd, 0xb9, 0x4b, 0xe4, 0x39, 0x7c, 0xa6, 0xcb, 0xdd, 0xd6, 0xf0, 0x88, 0xe2, 0x5a, 0x37, - 0x65, 0xb3, 0x6b, 0x38, 0x4b, 0x19, 0x84, 0xd1, 0x6b, 0x80, 0x22, 0x12, 0xee, 0xb9, 0x75, 0xb2, - 0x58, 0x67, 0x81, 0x87, 0x99, 0xae, 0x66, 0xc4, 0x70, 0x40, 0x41, 0xb5, 0x0e, 0x0c, 0x9c, 0x51, - 0x0b, 0x5d, 0xa6, 0x1c, 0x45, 0x2f, 0x15, 0x26, 0xd6, 0x52, 0xcc, 0x2b, 0x57, 0x48, 0x2b, 0x24, - 0x75, 0x27, 0x26, 0x0d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0xe7, 0x00, 0xa6, 0x61, - 0x70, 0x67, 0x4e, 0x07, 0x7a, 0x43, 0xda, 0x0e, 0xa2, 0xf8, 0x1a, 0x89, 0x6f, 0x07, 0xe1, 0x8e, - 0x88, 0xd6, 0x96, 0x04, 0x73, 0x4c, 0x40, 0x58, 0xc7, 0xa3, 0x12, 0x11, 0x7b, 0x18, 0x5b, 0xab, - 0xb0, 0x17, 0x8b, 0x91, 0x64, 0x9f, 0x5c, 0xe6, 0xc5, 0x58, 0xc2, 0x25, 0xea, 0x5a, 0x75, 0x99, - 0xbd, 0x43, 0xa4, 0x50, 0xd7, 0xaa, 0xcb, 0x58, 0xc2, 0x11, 0xe9, 0xcc, 0x03, 0x38, 0x91, 0xff, - 0x82, 0xd4, 0xc9, 0x97, 0xfb, 0x4c, 0x05, 0xe8, 0xc3, 0x94, 0xca, 0x40, 0xc8, 0xc3, 0xd8, 0x45, - 0xe5, 0x49, 0xb6, 0x48, 0xfa, 0x8f, 0x81, 0xa7, 0x74, 0x71, 0x6b, 0x29, 0x4a, 0xb8, 0x83, 0xb6, - 0x11, 0x50, 0x64, 0xaa, 0x67, 0x3e, 0x8e, 0x05, 0x28, 0x45, 0xed, 0xcd, 0x46, 0xb0, 0xeb, 0xb8, - 0x3e, 0x7b, 0x36, 0xd0, 0x04, 0x91, 0x9a, 0x04, 0xe0, 0x04, 0x07, 0xad, 0xc2, 0x88, 0x23, 0x2e, - 0x5f, 0x42, 0xd1, 0x9f, 0x19, 0x61, 0x40, 0x5e, 0xd0, 0xb8, 0x1e, 0x54, 0xfe, 0xc3, 0xaa, 0x2e, - 0x7a, 0x19, 0xc6, 0x85, 0x13, 0x9c, 0xb0, 0x5f, 0x3d, 0x65, 0xfa, 0x4b, 0xd4, 0x74, 0x20, 0x36, - 0x71, 0xd1, 0xcf, 0xc1, 0x04, 0xa5, 0x92, 0x30, 0xb6, 0xf2, 0x4c, 0x3f, 0x1c, 0x51, 0x8b, 0xb3, - 0xae, 0x57, 0xc6, 0x29, 0x62, 0xa8, 0x01, 0x0f, 0x3b, 0xed, 0x38, 0x60, 0xca, 0x4a, 0x73, 0xfd, - 0x6f, 0x04, 0x3b, 0xc4, 0x67, 0xda, 0xfd, 0x91, 0xa5, 0xf3, 0x77, 0x0f, 0xe6, 0x1e, 0x5e, 0xec, - 0x82, 0x87, 0xbb, 0x52, 0x41, 0x37, 0x60, 0x34, 0x0e, 0x3c, 0x61, 0x78, 0x1e, 0x95, 0x1f, 0xc8, - 0x0f, 0x88, 0xb4, 0xa1, 0xd0, 0x74, 0x75, 0x82, 0xaa, 0x8a, 0x75, 0x3a, 0x68, 0x83, 0xef, 0x31, - 0x16, 0xbe, 0x93, 0x44, 0xe5, 0x07, 0xf3, 0x07, 0x46, 0x45, 0xf9, 0x34, 0xb7, 0xa0, 0xa8, 0x89, - 0x75, 0x32, 0xe8, 0x12, 0x4c, 0xb7, 0x42, 0x37, 0x60, 0x0b, 0x5b, 0x29, 0x8a, 0xcb, 0x46, 0xa8, - 0xbc, 0xe9, 0x6a, 0x1a, 0x01, 0x77, 0xd6, 0x41, 0x17, 0xa8, 0x80, 0xca, 0x0b, 0xcb, 0x67, 0x78, - 0x9e, 0x16, 0x2e, 0x9c, 0xf2, 0x32, 0xac, 0xa0, 0xb3, 0x3f, 0x0d, 0xd3, 0x1d, 0x9c, 0xf2, 0x48, - 0x46, 0xc0, 0xff, 0x64, 0x10, 0x4a, 0x4a, 0x1d, 0x88, 0x16, 0x4c, 0x2d, 0xef, 0x99, 0xb4, 0x96, - 0x77, 0x84, 0xca, 0x6b, 0xba, 0x62, 0x77, 0x23, 0x23, 0xcd, 0xfc, 0xf9, 0x1c, 0xd6, 0xd0, 0xbf, - 0xc7, 0xde, 0x11, 0x52, 0xf0, 0x27, 0x17, 0xc6, 0x81, 0xae, 0x17, 0xc6, 0x3e, 0x53, 0x3e, 0xd2, - 0xab, 0x61, 0x2b, 0x68, 0xac, 0x55, 0xd3, 0x39, 0xd0, 0xaa, 0xb4, 0x10, 0x73, 0x18, 0x13, 0xee, - 0xe9, 0xb1, 0xce, 0x84, 0xfb, 0xe1, 0x7b, 0x14, 0xee, 0x25, 0x01, 0x9c, 0xd0, 0x42, 0x1e, 0x4c, - 0xd7, 0xcd, 0xf4, 0x75, 0xca, 0x4b, 0xef, 0xd1, 0x9e, 0x89, 0xe4, 0xda, 0x5a, 0x5e, 0x9b, 0xe5, - 0x34, 0x15, 0xdc, 0x49, 0x18, 0xbd, 0x0c, 0x23, 0xef, 0x06, 0x11, 0x5b, 0x76, 0xe2, 0x6c, 0x93, - 0x7e, 0x51, 0x23, 0xaf, 0x5f, 0xaf, 0xb1, 0xf2, 0xc3, 0x83, 0xb9, 0xd1, 0x6a, 0xd0, 0x90, 0x7f, - 0xb1, 0xaa, 0x80, 0xee, 0xc0, 0x69, 0x83, 0x23, 0xa8, 0xee, 0x42, 0xff, 0xdd, 0x3d, 0x2b, 0x9a, - 0x3b, 0xbd, 0x96, 0x45, 0x09, 0x67, 0x37, 0x60, 0x7f, 0x93, 0x2b, 0x3d, 0x85, 0x6a, 0x84, 0x44, - 0x6d, 0xef, 0x24, 0x92, 0x57, 0xac, 0x18, 0x5a, 0x9b, 0x7b, 0x56, 0xac, 0xff, 0x9e, 0xc5, 0x14, - 0xeb, 0x1b, 0x64, 0xb7, 0xe5, 0x39, 0xf1, 0x49, 0x98, 0x7e, 0xbf, 0x0e, 0x23, 0xb1, 0x68, 0xad, - 0x5b, 0xbe, 0x0d, 0xad, 0x53, 0xec, 0x71, 0x41, 0x1d, 0x88, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, - 0xe3, 0x33, 0x20, 0x21, 0x27, 0xa0, 0x5b, 0xa8, 0x98, 0xba, 0x85, 0xb9, 0x1e, 0x5f, 0x90, 0xa3, - 0x63, 0xf8, 0xa7, 0x66, 0xbf, 0xd9, 0xdd, 0xe3, 0xc3, 0xfe, 0xa2, 0x63, 0xff, 0xb2, 0x05, 0x33, - 0x59, 0x26, 0x01, 0x54, 0x88, 0xe1, 0x37, 0x1f, 0xf5, 0xc2, 0xa5, 0x46, 0xf0, 0xa6, 0x28, 0xc7, - 0x0a, 0xa3, 0xef, 0x98, 0xf7, 0x47, 0x0b, 0x02, 0x76, 0x1d, 0xcc, 0x4c, 0x87, 0xe8, 0x55, 0xee, - 0xcb, 0x61, 0xa9, 0x54, 0x84, 0x47, 0xf3, 0xe3, 0xb0, 0x7f, 0xbd, 0x00, 0x33, 0x5c, 0x45, 0xbd, - 0xb8, 0x17, 0xb8, 0x8d, 0x6a, 0xd0, 0x10, 0x9e, 0x2d, 0x6f, 0xc2, 0x58, 0x4b, 0xbb, 0xae, 0x76, - 0x0b, 0x43, 0xa4, 0x5f, 0x6b, 0x93, 0x6b, 0x83, 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x01, 0x63, 0x64, - 0xcf, 0xad, 0x2b, 0x3d, 0x67, 0xe1, 0xc8, 0x2c, 0x5d, 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, - 0xfb, 0x90, 0x99, 0xc6, 0xfe, 0x8a, 0x05, 0x0f, 0xe6, 0x04, 0x2d, 0xa2, 0xcd, 0xdd, 0x66, 0x8f, - 0x01, 0x22, 0x6d, 0xa6, 0x6a, 0x8e, 0x3f, 0x11, 0x60, 0x01, 0x45, 0x9f, 0x06, 0xe0, 0x2a, 0x7e, - 0x2a, 0x45, 0x8b, 0x4f, 0xef, 0x2f, 0x98, 0x87, 0x16, 0xf1, 0x41, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, - 0x57, 0x8b, 0x30, 0xc8, 0x73, 0xa8, 0xaf, 0xc2, 0xf0, 0x36, 0x0f, 0x91, 0xdc, 0x4f, 0x34, 0xe6, - 0xe4, 0x3a, 0xc2, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0xe1, 0x94, 0xf0, 0x9e, 0xaa, 0x10, 0xcf, 0xd9, - 0x97, 0xb7, 0x5a, 0x9e, 0xae, 0x44, 0x86, 0xd2, 0x3f, 0xb5, 0xd6, 0x89, 0x82, 0xb3, 0xea, 0xa1, - 0x57, 0x3b, 0x02, 0x23, 0xf2, 0xe0, 0xd2, 0x4a, 0x06, 0xee, 0x11, 0x1c, 0xf1, 0x65, 0x18, 0x6f, - 0x75, 0xdc, 0xdf, 0xb5, 0xf4, 0xd5, 0xe6, 0x9d, 0xdd, 0xc4, 0x65, 0x56, 0x05, 0x6d, 0x66, 0x43, - 0xb1, 0xb1, 0x1d, 0x92, 0x68, 0x3b, 0xf0, 0x1a, 0x22, 0x57, 0x6b, 0x62, 0x55, 0x90, 0x82, 0xe3, - 0x8e, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0xed, 0x90, 0x24, 0x54, 0x86, 0x4c, 0x2a, 0xab, 0x29, - 0x38, 0xee, 0xa8, 0x41, 0xd7, 0xd1, 0x69, 0x91, 0xe8, 0x53, 0xfa, 0xd4, 0x2b, 0x53, 0x91, 0x61, - 0x69, 0x5b, 0xdf, 0x25, 0xce, 0x8b, 0x78, 0xf2, 0x57, 0xa9, 0x42, 0xb5, 0x34, 0x72, 0xc2, 0xaa, - 0x5e, 0x52, 0xb9, 0x97, 0x74, 0x93, 0x7f, 0x6c, 0xc1, 0xa9, 0x0c, 0x43, 0x32, 0xce, 0xaa, 0x9a, - 0x6e, 0x14, 0xab, 0x2c, 0x19, 0x1a, 0xab, 0xe2, 0xe5, 0x58, 0x61, 0xd0, 0xfd, 0xc0, 0x99, 0x61, - 0x9a, 0x01, 0x0a, 0x93, 0x0f, 0x01, 0x3d, 0x1a, 0x03, 0x44, 0xe7, 0x61, 0xa0, 0x1d, 0x91, 0x50, - 0xe6, 0x69, 0x94, 0xfc, 0x9b, 0x69, 0x04, 0x19, 0x84, 0x4a, 0x94, 0x4d, 0xa5, 0x8c, 0xd3, 0x24, - 0x4a, 0xae, 0x8e, 0xe3, 0x30, 0xfb, 0x4b, 0x45, 0x38, 0x93, 0x6b, 0x14, 0x4a, 0xbb, 0xb4, 0x1b, - 0xf8, 0x6e, 0x1c, 0xa8, 0x08, 0x7d, 0x3c, 0x20, 0x09, 0x69, 0x6d, 0xaf, 0x8b, 0x72, 0xac, 0x30, - 0xd0, 0x63, 0x32, 0x8d, 0x6f, 0x3a, 0x0f, 0xc8, 0x52, 0xc5, 0xc8, 0xe4, 0xdb, 0x6f, 0x42, 0x9f, - 0x47, 0x61, 0xa0, 0x15, 0xa8, 0x1c, 0xeb, 0x6a, 0x66, 0x69, 0x77, 0x83, 0xc0, 0xc3, 0x0c, 0x88, - 0x3e, 0x26, 0xc6, 0x21, 0xf5, 0x72, 0x81, 0x9d, 0x46, 0x10, 0x69, 0x83, 0xf1, 0x04, 0x0c, 0xef, - 0x90, 0xfd, 0xd0, 0xf5, 0x9b, 0xe9, 0x77, 0x9b, 0x2b, 0xbc, 0x18, 0x4b, 0xb8, 0x99, 0x23, 0x6f, - 0xb8, 0x57, 0x8e, 0xbc, 0xa3, 0xa6, 0xe3, 0x19, 0xe9, 0x79, 0xb4, 0x7d, 0xb1, 0x08, 0x93, 0x78, - 0xa9, 0xf2, 0x93, 0x89, 0xb8, 0xd1, 0x39, 0x11, 0xc7, 0x9d, 0x1c, 0xa9, 0xf7, 0x6c, 0x7c, 0xc3, - 0x82, 0x49, 0x16, 0x96, 0x58, 0x04, 0xd4, 0x70, 0x03, 0xff, 0x04, 0x44, 0xb7, 0x47, 0x61, 0x30, - 0xa4, 0x8d, 0xa6, 0x33, 0x9e, 0xb0, 0x9e, 0x60, 0x0e, 0x43, 0x0f, 0xc3, 0x00, 0xeb, 0x02, 0x9d, - 0xbc, 0x31, 0x9e, 0x98, 0xa0, 0xe2, 0xc4, 0x0e, 0x66, 0xa5, 0xcc, 0xb3, 0x11, 0x93, 0x96, 0xe7, - 0xf2, 0x4e, 0x27, 0x1a, 0xf0, 0x0f, 0x87, 0x67, 0x63, 0x66, 0xd7, 0xde, 0x9f, 0x67, 0x63, 0x36, - 0xc9, 0xee, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x73, 0x99, 0xf5, 0xfa, 0xf6, 0x6c, 0xec, 0x5e, 0xfb, - 0x78, 0x9e, 0xdf, 0xb3, 0x5f, 0xc5, 0x8b, 0x27, 0xf8, 0x2a, 0x3e, 0xd0, 0xaf, 0xe4, 0x38, 0xd8, - 0x87, 0xc3, 0x61, 0xe6, 0x90, 0x7d, 0x48, 0x1c, 0x0e, 0x33, 0xfb, 0x96, 0x73, 0xad, 0xfb, 0x61, - 0x21, 0xe7, 0x5b, 0xd8, 0x05, 0xef, 0x02, 0xe5, 0x33, 0x0c, 0x18, 0x09, 0x49, 0x78, 0x8c, 0xf3, - 0x18, 0x5e, 0x86, 0x15, 0x14, 0xb9, 0x9a, 0xeb, 0x1e, 0xef, 0xda, 0xcb, 0x47, 0xda, 0x32, 0xf3, - 0xe6, 0x83, 0x85, 0x1e, 0xfd, 0x23, 0xed, 0xc6, 0xb7, 0xae, 0x5d, 0xca, 0x8b, 0xfd, 0x5f, 0xca, - 0xc7, 0xb2, 0x2f, 0xe4, 0x68, 0x11, 0x26, 0x77, 0x5d, 0x9f, 0xa5, 0x2d, 0x36, 0x45, 0x51, 0xe5, - 0xc9, 0xbe, 0x6e, 0x82, 0x71, 0x1a, 0x7f, 0xf6, 0x65, 0x18, 0xbf, 0x77, 0x2d, 0xe2, 0x77, 0x8b, - 0xf0, 0x50, 0x97, 0x6d, 0xcf, 0x79, 0xbd, 0x31, 0x07, 0x1a, 0xaf, 0xef, 0x98, 0x87, 0x2a, 0xcc, - 0x6c, 0xb5, 0x3d, 0x6f, 0x9f, 0x19, 0x9e, 0x91, 0x86, 0xc4, 0x10, 0xb2, 0xa2, 0x8a, 0x39, 0xbe, - 0x9a, 0x81, 0x83, 0x33, 0x6b, 0xa2, 0xd7, 0x00, 0x05, 0x9b, 0x2c, 0x0e, 0x76, 0x23, 0x89, 0x69, - 0xc2, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xeb, 0x1d, 0x18, 0x38, 0xa3, 0x16, 0x15, 0xfa, 0xe9, 0xa9, - 0xb4, 0xaf, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0xd3, 0xce, 0x9e, - 0xe3, 0xf2, 0x18, 0x77, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, 0x31, 0x8d, 0x80, 0x3b, 0xeb, - 0xa4, 0x7c, 0x10, 0x87, 0xf2, 0x7d, 0x10, 0xbb, 0xf3, 0xc5, 0x5e, 0xaa, 0x58, 0xfb, 0x3f, 0x5a, - 0xf4, 0xf8, 0xca, 0xc8, 0x93, 0x4b, 0xc7, 0x41, 0xa9, 0x14, 0x35, 0x77, 0x40, 0x35, 0x0e, 0xcb, - 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xdd, 0xbb, 0x21, 0xba, 0x0b, 0x3f, 0x5d, 0x85, - 0x81, 0xde, 0x80, 0xe1, 0x86, 0xbb, 0xe7, 0x46, 0x41, 0x28, 0x36, 0xcb, 0x51, 0x73, 0xc3, 0x2b, - 0x3e, 0x58, 0xe1, 0x64, 0xb0, 0xa4, 0x67, 0x7f, 0xb1, 0x00, 0xe3, 0xb2, 0xc5, 0xd7, 0xdb, 0x41, - 0xec, 0x9c, 0xc0, 0xb1, 0x7c, 0xc9, 0x38, 0x96, 0x3f, 0xd6, 0xcd, 0x59, 0x99, 0x75, 0x29, 0xf7, - 0x38, 0xbe, 0x9e, 0x3a, 0x8e, 0x1f, 0xef, 0x4d, 0xaa, 0xfb, 0x31, 0xfc, 0xcf, 0x2d, 0x98, 0x36, - 0xf0, 0x4f, 0xe0, 0x34, 0x58, 0x35, 0x4f, 0x83, 0x47, 0x7a, 0x7e, 0x43, 0xce, 0x29, 0xf0, 0xb5, - 0x42, 0xaa, 0xef, 0x8c, 0xfb, 0xbf, 0x0b, 0x03, 0xdb, 0x4e, 0xd8, 0xe8, 0x16, 0xa9, 0xb5, 0xa3, - 0xd2, 0xfc, 0x65, 0x27, 0x6c, 0x70, 0x1e, 0xfe, 0xb4, 0x4a, 0xe1, 0xe7, 0x84, 0x8d, 0x9e, 0x6e, - 0x1e, 0xac, 0x29, 0xf4, 0x12, 0x0c, 0x45, 0xf5, 0xa0, 0xa5, 0xcc, 0x61, 0xcf, 0xf3, 0xf4, 0x7e, - 0xb4, 0xe4, 0xf0, 0x60, 0x0e, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, 0x67, 0x9b, 0x50, 0x52, 0x4d, - 0xdf, 0x57, 0x43, 0xff, 0x3f, 0x2c, 0xc2, 0xa9, 0x8c, 0x75, 0x81, 0x22, 0x63, 0xb4, 0x9e, 0xed, - 0x73, 0x39, 0xbd, 0xcf, 0xf1, 0x8a, 0xd8, 0x8d, 0xa5, 0x21, 0xe6, 0xbf, 0xef, 0x46, 0x6f, 0x44, - 0x24, 0xdd, 0x28, 0x2d, 0xea, 0xdd, 0x28, 0x6d, 0xec, 0xc4, 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0x7a, - 0x5f, 0xe7, 0xf4, 0x4f, 0x8a, 0x30, 0x93, 0x15, 0xe3, 0x00, 0xfd, 0x7c, 0x2a, 0xef, 0xcb, 0xf3, - 0xfd, 0x46, 0x47, 0xe0, 0xc9, 0x60, 0x44, 0x50, 0xa8, 0x79, 0x33, 0x13, 0x4c, 0xcf, 0x61, 0x16, - 0x6d, 0x32, 0xbf, 0xab, 0x90, 0xe7, 0xeb, 0x91, 0x5b, 0xfc, 0x13, 0x7d, 0x77, 0x40, 0x24, 0xfa, - 0x89, 0x52, 0x7e, 0x57, 0xb2, 0xb8, 0xb7, 0xdf, 0x95, 0x6c, 0x79, 0xd6, 0x85, 0x51, 0xed, 0x6b, - 0xee, 0xeb, 0x8c, 0xef, 0xd0, 0x13, 0x45, 0xeb, 0xf7, 0x7d, 0x9d, 0xf5, 0xaf, 0x58, 0x90, 0x32, - 0x5d, 0x53, 0x2a, 0x29, 0x2b, 0x57, 0x25, 0x75, 0x1e, 0x06, 0xc2, 0xc0, 0x23, 0xe9, 0x54, 0x20, - 0x38, 0xf0, 0x08, 0x66, 0x10, 0x95, 0xaf, 0xbb, 0x98, 0x97, 0xaf, 0x9b, 0x5e, 0x8d, 0x3d, 0xb2, - 0x47, 0xa4, 0x36, 0x42, 0xf1, 0xe4, 0xab, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x8d, 0x01, 0x38, 0xdb, - 0xd5, 0xdf, 0x90, 0x5e, 0x59, 0x9a, 0x4e, 0x4c, 0x6e, 0x3b, 0xfb, 0xe9, 0x40, 0xc5, 0x97, 0x78, - 0x31, 0x96, 0x70, 0x66, 0x68, 0xcb, 0x63, 0x1d, 0xa6, 0x14, 0x78, 0x22, 0xc4, 0xa1, 0x80, 0x9a, - 0x8a, 0xa3, 0xe2, 0x71, 0x28, 0x8e, 0x2e, 0x02, 0x44, 0x91, 0xb7, 0xe2, 0x53, 0x09, 0xac, 0x21, - 0x2c, 0x78, 0x93, 0x98, 0x98, 0xb5, 0xab, 0x02, 0x82, 0x35, 0x2c, 0x54, 0x81, 0xa9, 0x56, 0x18, - 0xc4, 0x5c, 0x1f, 0x5a, 0xe1, 0xb6, 0x23, 0x83, 0xa6, 0xd3, 0x58, 0x35, 0x05, 0xc7, 0x1d, 0x35, - 0xd0, 0x0b, 0x30, 0x2a, 0x1c, 0xc9, 0xaa, 0x41, 0xe0, 0x09, 0x55, 0x8d, 0xb2, 0x44, 0xa8, 0x25, - 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x98, 0x92, 0x75, 0x38, 0xb3, 0x1a, 0x57, 0xb4, 0x6a, 0x78, 0xa9, - 0x78, 0x27, 0x23, 0x7d, 0xc5, 0x3b, 0x49, 0x94, 0x57, 0xa5, 0xbe, 0xdf, 0x95, 0xa0, 0xa7, 0xba, - 0xe7, 0x37, 0x06, 0xe0, 0x94, 0x58, 0x38, 0xf7, 0x7b, 0xb9, 0xdc, 0xa7, 0xdc, 0xdf, 0x3f, 0x59, - 0x33, 0x27, 0xbd, 0x66, 0xbe, 0x59, 0x84, 0x21, 0x3e, 0x15, 0x27, 0x20, 0xc3, 0xaf, 0x0a, 0xa5, - 0x5f, 0x97, 0x88, 0x21, 0xbc, 0x2f, 0xf3, 0x15, 0x27, 0x76, 0xf8, 0xf9, 0xa5, 0xd8, 0x68, 0xa2, - 0x1e, 0x44, 0xf3, 0x06, 0xa3, 0x9d, 0x4d, 0x69, 0xb5, 0x80, 0xd3, 0xd0, 0xd8, 0xee, 0xdb, 0x00, - 0x11, 0xcb, 0x3f, 0x4d, 0x69, 0x88, 0xd8, 0x33, 0x4f, 0x76, 0x69, 0xbd, 0xa6, 0x90, 0x79, 0x1f, - 0x92, 0x25, 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xfb, 0x22, 0x94, 0x14, 0x72, 0x2f, 0x15, 0xc0, 0x98, - 0x7e, 0xea, 0x7d, 0x0a, 0x26, 0x53, 0x6d, 0x1d, 0x49, 0x83, 0xf0, 0x5b, 0x16, 0x4c, 0xf2, 0x2e, - 0xaf, 0xf8, 0x7b, 0x62, 0xb3, 0xbf, 0x07, 0x33, 0x5e, 0xc6, 0xa6, 0x13, 0x33, 0xda, 0xff, 0x26, - 0x55, 0x1a, 0x83, 0x2c, 0x28, 0xce, 0x6c, 0x03, 0x5d, 0x80, 0x11, 0x9e, 0x39, 0xdf, 0xf1, 0x84, - 0x37, 0xc1, 0x18, 0xcf, 0x1d, 0xc0, 0xcb, 0xb0, 0x82, 0xda, 0xdf, 0xb3, 0x60, 0x9a, 0xf7, 0xfc, - 0x0a, 0xd9, 0x57, 0xb7, 0xe3, 0x0f, 0xb2, 0xef, 0x22, 0x35, 0x42, 0x21, 0x27, 0x35, 0x82, 0xfe, - 0x69, 0xc5, 0xae, 0x9f, 0xf6, 0xeb, 0x16, 0x88, 0x15, 0x78, 0x02, 0xf7, 0xc0, 0x9f, 0x36, 0xef, - 0x81, 0xb3, 0xf9, 0x8b, 0x3a, 0xe7, 0x02, 0xf8, 0x67, 0x16, 0x4c, 0x71, 0x84, 0xe4, 0x21, 0xf2, - 0x03, 0x9d, 0x87, 0x7e, 0xf2, 0x75, 0xa9, 0x04, 0xc9, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xd7, - 0xc9, 0x6a, 0xc8, 0x0d, 0x74, 0x84, 0x3c, 0x74, 0x47, 0x8e, 0xe6, 0x69, 0xff, 0x37, 0x0b, 0x10, - 0x6f, 0xc6, 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, - 0x58, 0xc7, 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xef, 0xd7, 0xe4, 0x23, 0x8c, 0xe8, 0x5f, 0x1d, - 0x80, 0xb4, 0xe9, 0x32, 0xba, 0x09, 0x63, 0x75, 0xa7, 0xe5, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, - 0xea, 0x66, 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, - 0x2b, 0x74, 0xf7, 0x5c, 0x8f, 0x34, 0xd9, 0x55, 0x98, 0xf9, 0x2f, 0x71, 0xdb, 0x0a, 0x59, 0x8a, - 0x35, 0x8c, 0x0c, 0x7f, 0x97, 0xe2, 0xfd, 0xf3, 0x77, 0x19, 0x38, 0xa2, 0xbf, 0xcb, 0x60, 0x5f, - 0xfe, 0x2e, 0x18, 0x1e, 0x90, 0x67, 0x37, 0xfd, 0xbf, 0xea, 0x7a, 0x44, 0x08, 0x6c, 0xdc, 0xab, - 0x69, 0xf6, 0xee, 0xc1, 0xdc, 0x03, 0x38, 0x13, 0x03, 0xe7, 0xd4, 0x44, 0x9f, 0x86, 0xb2, 0xe3, - 0x79, 0xc1, 0x6d, 0x35, 0x6a, 0x2b, 0x51, 0xdd, 0xf1, 0x92, 0xe0, 0xd6, 0x23, 0x4b, 0x0f, 0xdf, - 0x3d, 0x98, 0x2b, 0x2f, 0xe6, 0xe0, 0xe0, 0xdc, 0xda, 0xf6, 0x0e, 0x9c, 0xaa, 0x91, 0x50, 0xa6, - 0xb6, 0x54, 0x5b, 0x6c, 0x03, 0x4a, 0x61, 0x8a, 0xa9, 0xf4, 0x15, 0xfa, 0x42, 0x0b, 0x3c, 0x28, - 0x99, 0x48, 0x42, 0xc8, 0xfe, 0x53, 0x0b, 0x86, 0x85, 0x39, 0xf4, 0x09, 0xc8, 0x32, 0x8b, 0x86, - 0x3e, 0x72, 0x2e, 0x9b, 0xf1, 0xb2, 0xce, 0xe4, 0x6a, 0x22, 0xd7, 0x52, 0x9a, 0xc8, 0x47, 0xba, - 0x11, 0xe9, 0xae, 0x83, 0xfc, 0xa5, 0x22, 0x4c, 0x98, 0xa6, 0xe0, 0x27, 0x30, 0x04, 0xd7, 0x60, - 0x38, 0x12, 0x7e, 0x07, 0x85, 0x7c, 0xfb, 0xd5, 0xf4, 0x24, 0x26, 0x56, 0x2e, 0xc2, 0xd3, 0x40, - 0x12, 0xc9, 0x74, 0x68, 0x28, 0xde, 0x47, 0x87, 0x86, 0x5e, 0xd6, 0xf8, 0x03, 0xc7, 0x61, 0x8d, - 0x6f, 0x7f, 0x8b, 0x31, 0x7f, 0xbd, 0xfc, 0x04, 0xe4, 0x82, 0x4b, 0xe6, 0x31, 0x61, 0x77, 0x59, - 0x59, 0xa2, 0x53, 0x39, 0xf2, 0xc1, 0x3f, 0xb2, 0x60, 0x54, 0x20, 0x9e, 0x40, 0xb7, 0x7f, 0xc6, - 0xec, 0xf6, 0x43, 0x5d, 0xba, 0x9d, 0xd3, 0xdf, 0xbf, 0x5d, 0x50, 0xfd, 0xad, 0x06, 0x61, 0xdc, - 0x57, 0xb2, 0x83, 0x11, 0x7a, 0x1b, 0x0c, 0xea, 0x81, 0x27, 0x0e, 0xf3, 0x87, 0x13, 0xc7, 0x56, - 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0xf9, 0x5d, 0x06, 0x61, 0x2c, 0x0e, 0xd0, 0xc4, 0xef, - 0x32, 0x08, 0x63, 0xcc, 0x20, 0xa8, 0x01, 0x10, 0x3b, 0x61, 0x93, 0xc4, 0xb4, 0x4c, 0x78, 0x82, - 0xe7, 0xef, 0xc2, 0x76, 0xec, 0x7a, 0xf3, 0xae, 0x1f, 0x47, 0x71, 0x38, 0xbf, 0xe6, 0xc7, 0xd7, - 0x43, 0x7e, 0x37, 0xd0, 0x3c, 0x55, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0xae, 0x52, 0xac, 0x8d, 0x41, - 0xf3, 0xa1, 0xf0, 0x9a, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x91, 0xf1, 0x64, 0x36, 0x40, 0x47, 0x73, - 0x22, 0xfd, 0xce, 0x88, 0x1a, 0x5a, 0xf6, 0x4a, 0x50, 0xd1, 0x5d, 0x55, 0xbb, 0xb3, 0x40, 0xda, - 0xb0, 0xee, 0x16, 0x90, 0xf8, 0xb3, 0xa2, 0x9f, 0xed, 0x78, 0x3f, 0x7e, 0xa6, 0x07, 0x2f, 0x3d, - 0xc2, 0x8b, 0x31, 0x8b, 0xbc, 0xc9, 0x22, 0x14, 0xae, 0x55, 0xd3, 0xe9, 0x28, 0x96, 0x25, 0x00, - 0x27, 0x38, 0x68, 0x41, 0xdc, 0x2c, 0xb9, 0x7e, 0xee, 0xa1, 0xd4, 0xcd, 0x52, 0x7e, 0xbe, 0x76, - 0xb5, 0x7c, 0x16, 0x46, 0x55, 0x8a, 0xaf, 0x2a, 0xcf, 0x94, 0x54, 0xe2, 0xb2, 0xd4, 0x4a, 0x52, - 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x93, 0x11, 0xcf, 0x3f, 0x26, 0xbd, 0x97, 0x84, 0xde, 0xe0, 0x49, - 0xf9, 0xee, 0x5c, 0x33, 0xc1, 0x87, 0xac, 0x88, 0x6f, 0x56, 0xe9, 0xef, 0x94, 0x26, 0x81, 0x5e, - 0x85, 0x09, 0x4f, 0xcf, 0xc3, 0x5c, 0x15, 0x6a, 0x05, 0x65, 0x96, 0x69, 0x64, 0x69, 0xae, 0xe2, - 0x14, 0x36, 0x15, 0x02, 0xf4, 0x12, 0x11, 0x0c, 0xcb, 0xf1, 0x9b, 0x24, 0x12, 0x09, 0x8a, 0x98, - 0x10, 0x70, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x2f, 0xc1, 0x98, 0xfc, 0x7c, 0xcd, 0x9b, 0x2f, - 0x31, 0xfe, 0xd5, 0x60, 0xd8, 0xc0, 0x44, 0xb7, 0xe1, 0xb4, 0xfc, 0xbf, 0x11, 0x3a, 0x5b, 0x5b, - 0x6e, 0x5d, 0x38, 0x53, 0x8e, 0x32, 0x12, 0x8b, 0xd2, 0x13, 0x62, 0x25, 0x0b, 0xe9, 0xf0, 0x60, - 0xee, 0xbc, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x1d, 0x4e, 0x6d, 0x13, 0xc7, - 0x8b, 0xb7, 0x97, 0xb7, 0x49, 0x7d, 0x47, 0x6e, 0x22, 0xe6, 0x23, 0xa8, 0x99, 0xcc, 0x5e, 0xee, - 0x44, 0xc1, 0x59, 0xf5, 0xd0, 0x5b, 0x50, 0x6e, 0xb5, 0x37, 0x3d, 0x37, 0xda, 0xbe, 0x16, 0xc4, - 0xec, 0xa9, 0x5b, 0x65, 0xc8, 0x12, 0xce, 0x84, 0xca, 0x3f, 0xb2, 0x9a, 0x83, 0x87, 0x73, 0x29, - 0xa0, 0xf7, 0xe0, 0x74, 0x6a, 0x31, 0xf0, 0xa4, 0x6b, 0xc2, 0xe9, 0xf0, 0x89, 0xec, 0xed, 0x94, - 0x51, 0x81, 0xbb, 0xb8, 0x66, 0x82, 0x70, 0x76, 0x13, 0xef, 0xcf, 0x00, 0xe2, 0x5d, 0x5a, 0x59, - 0x93, 0x6e, 0xd0, 0x67, 0x60, 0x4c, 0x5f, 0x45, 0xe2, 0x80, 0x79, 0xac, 0x57, 0xce, 0x71, 0x21, - 0x1b, 0xa9, 0x15, 0xa5, 0xc3, 0xb0, 0x41, 0xd1, 0x26, 0x90, 0xfd, 0x7d, 0xe8, 0x2a, 0x8c, 0xd4, - 0x3d, 0x97, 0xf8, 0xf1, 0x5a, 0xb5, 0x9b, 0x13, 0xfc, 0xb2, 0xc0, 0x11, 0x03, 0x26, 0x62, 0xb1, - 0xf1, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5b, 0x80, 0xb9, 0x1e, 0xe1, 0xf8, 0x52, 0x3a, 0x40, 0xab, - 0x2f, 0x1d, 0xe0, 0xa2, 0xcc, 0xf7, 0x75, 0x2d, 0x75, 0xff, 0x4c, 0xe5, 0xf2, 0x4a, 0x6e, 0xa1, - 0x69, 0xfc, 0xbe, 0xed, 0x26, 0x75, 0x35, 0xe2, 0x40, 0x4f, 0x8b, 0x5e, 0xe3, 0xf9, 0x60, 0xb0, - 0x7f, 0x89, 0x3e, 0x57, 0x15, 0x6c, 0x7f, 0xab, 0x00, 0xa7, 0xd5, 0x10, 0xfe, 0xf8, 0x0e, 0xdc, - 0x8d, 0xce, 0x81, 0x3b, 0x06, 0x45, 0xba, 0x7d, 0x1d, 0x86, 0x6a, 0xfb, 0x51, 0x3d, 0xf6, 0xfa, - 0x10, 0x80, 0x1e, 0x35, 0x36, 0x68, 0x72, 0x4c, 0xb3, 0x94, 0x9d, 0x62, 0xbf, 0xda, 0x7f, 0xc9, - 0x82, 0xc9, 0x8d, 0xe5, 0x6a, 0x2d, 0xa8, 0xef, 0x90, 0x78, 0x91, 0xab, 0x89, 0xb0, 0x90, 0x7f, - 0xac, 0x7b, 0x94, 0x6b, 0xb2, 0x24, 0xa6, 0xf3, 0x30, 0xb0, 0x1d, 0x44, 0x71, 0xfa, 0x95, 0xed, - 0x72, 0x10, 0xc5, 0x98, 0x41, 0xec, 0x3f, 0xb2, 0x60, 0x90, 0x65, 0xa9, 0xec, 0x95, 0xcd, 0xb4, - 0x9f, 0xef, 0x42, 0x2f, 0xc0, 0x10, 0xd9, 0xda, 0x22, 0xf5, 0x58, 0xcc, 0xaa, 0xf4, 0xae, 0x1b, - 0x5a, 0x61, 0xa5, 0xf4, 0xd0, 0x67, 0x8d, 0xf1, 0xbf, 0x58, 0x20, 0xa3, 0x5b, 0x50, 0x8a, 0xdd, - 0x5d, 0xb2, 0xd8, 0x68, 0x88, 0x77, 0x8a, 0x7b, 0x70, 0x66, 0xdc, 0x90, 0x04, 0x70, 0x42, 0xcb, - 0xfe, 0x52, 0x01, 0x20, 0xf1, 0xc0, 0xed, 0xf5, 0x89, 0x4b, 0x1d, 0x09, 0x5b, 0x1f, 0xcb, 0x48, - 0xd8, 0x8a, 0x12, 0x82, 0x19, 0xe9, 0x5a, 0xd5, 0x30, 0x15, 0xfb, 0x1a, 0xa6, 0x81, 0xa3, 0x0c, - 0xd3, 0x32, 0x4c, 0x27, 0x1e, 0xc4, 0x66, 0x38, 0x05, 0x16, 0x8a, 0x7b, 0x23, 0x0d, 0xc4, 0x9d, - 0xf8, 0xf6, 0x17, 0x2c, 0x10, 0xee, 0x06, 0x7d, 0x2c, 0xe6, 0x37, 0x65, 0x6e, 0x45, 0x23, 0x3e, - 0xe8, 0xf9, 0x7c, 0xff, 0x0b, 0x11, 0x15, 0x54, 0x1d, 0x1e, 0x46, 0x2c, 0x50, 0x83, 0x96, 0xfd, - 0xd7, 0x0b, 0x30, 0xca, 0xc1, 0x2c, 0xf6, 0x64, 0x1f, 0xbd, 0x39, 0x52, 0x20, 0x77, 0x96, 0x76, - 0x90, 0x12, 0x56, 0xf1, 0xbe, 0xf5, 0xb4, 0x83, 0x12, 0x80, 0x13, 0x1c, 0xf4, 0x04, 0x0c, 0x47, - 0xed, 0x4d, 0x86, 0x9e, 0x32, 0x6f, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xe8, 0xd3, 0x30, 0xc5, 0xeb, - 0x85, 0x41, 0xcb, 0x69, 0x72, 0xdd, 0xce, 0xa0, 0xf2, 0x37, 0x9b, 0x5a, 0x4f, 0xc1, 0x0e, 0x0f, - 0xe6, 0x66, 0xd2, 0x65, 0x4c, 0x2b, 0xd8, 0x41, 0x85, 0xae, 0xd8, 0xa9, 0xb4, 0x2b, 0x0b, 0xba, - 0x0c, 0x43, 0x9c, 0x19, 0x09, 0xe6, 0xd0, 0xe5, 0xad, 0x47, 0x73, 0x80, 0x61, 0x31, 0xae, 0x05, - 0x3f, 0x13, 0xf5, 0xd1, 0x5b, 0x30, 0xda, 0x08, 0x6e, 0xfb, 0xb7, 0x9d, 0xb0, 0xb1, 0x58, 0x5d, - 0x13, 0xf3, 0x99, 0x29, 0xd3, 0x54, 0x12, 0x34, 0xdd, 0xa9, 0x86, 0xe9, 0x35, 0x13, 0x10, 0xd6, - 0xc9, 0xa1, 0x0d, 0x16, 0x2e, 0x89, 0xe7, 0x05, 0xef, 0x66, 0x0f, 0xa6, 0x52, 0x89, 0x6b, 0x94, - 0xc7, 0x45, 0x4c, 0x25, 0x91, 0x55, 0x3c, 0x21, 0x64, 0x7f, 0xee, 0x14, 0x18, 0xeb, 0xc8, 0x08, - 0xe4, 0x6e, 0x1d, 0x53, 0x20, 0x77, 0x0c, 0x23, 0x64, 0xb7, 0x15, 0xef, 0x57, 0xdc, 0xb0, 0x5b, - 0x06, 0x8f, 0x15, 0x81, 0xd3, 0x49, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x1d, 0x6d, 0xbf, 0xf8, 0x01, - 0x46, 0xdb, 0x1f, 0x38, 0xc1, 0x68, 0xfb, 0xd7, 0x60, 0xb8, 0xe9, 0xc6, 0x98, 0xb4, 0x02, 0x71, - 0x10, 0x67, 0xae, 0x84, 0x4b, 0x1c, 0xa5, 0x33, 0xfe, 0xb3, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa6, - 0xf6, 0xc0, 0x50, 0xbe, 0x1c, 0xdb, 0xf9, 0x2c, 0x90, 0xb9, 0x0b, 0x44, 0x74, 0xfd, 0xe1, 0x7b, - 0x8d, 0xae, 0xaf, 0x62, 0xe2, 0x8f, 0xbc, 0xbf, 0x98, 0xf8, 0x46, 0xf6, 0x80, 0xd2, 0xf1, 0x65, - 0x0f, 0xf8, 0x82, 0x05, 0xa7, 0x5b, 0x59, 0x89, 0x34, 0x44, 0x74, 0xfb, 0x17, 0xfa, 0xce, 0x14, - 0x62, 0x34, 0xc8, 0x2e, 0x34, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, 0xe1, 0x66, 0x43, 0x84, - 0xc4, 0x7f, 0x34, 0x27, 0x0d, 0x41, 0x97, 0xe4, 0x03, 0xf7, 0x27, 0x0c, 0x7e, 0x92, 0x8a, 0x60, - 0xfc, 0x7d, 0xa7, 0x22, 0x78, 0x4d, 0xa5, 0x22, 0xe8, 0x12, 0x94, 0x86, 0x27, 0x1a, 0xe8, 0x99, - 0x80, 0x40, 0x4b, 0x22, 0x30, 0x79, 0x1c, 0x49, 0x04, 0xde, 0x36, 0x99, 0x3d, 0x8f, 0x68, 0xff, - 0x54, 0x0f, 0x66, 0x6f, 0xd0, 0xed, 0xce, 0xee, 0x79, 0xc2, 0x84, 0xe9, 0x7b, 0x4a, 0x98, 0x70, - 0x53, 0x4f, 0x45, 0x80, 0x7a, 0xc4, 0xda, 0xa7, 0x48, 0x7d, 0x26, 0x20, 0xb8, 0xa9, 0x1f, 0x41, - 0xa7, 0xf2, 0xe9, 0xaa, 0x93, 0xa6, 0x93, 0x6e, 0xd6, 0x21, 0xd4, 0x99, 0xd8, 0x60, 0xe6, 0x64, - 0x12, 0x1b, 0x9c, 0x3e, 0xf6, 0xc4, 0x06, 0x0f, 0x9c, 0x40, 0x62, 0x83, 0x07, 0x3f, 0xd0, 0xc4, - 0x06, 0xe5, 0xfb, 0x90, 0xd8, 0xe0, 0x5a, 0x92, 0xd8, 0xe0, 0x4c, 0xfe, 0x94, 0x64, 0xd8, 0x8b, - 0xe5, 0xa4, 0x33, 0xb8, 0x09, 0xa5, 0x96, 0xf4, 0x76, 0x2e, 0xcf, 0xe6, 0x4f, 0x49, 0xa6, 0x4b, - 0x34, 0x9f, 0x12, 0x05, 0xc2, 0x09, 0x29, 0x4a, 0x37, 0x49, 0x6f, 0xf0, 0x50, 0x17, 0x95, 0x55, - 0x96, 0x32, 0xa0, 0x4b, 0x52, 0x83, 0xbf, 0x5c, 0x80, 0x73, 0xdd, 0xd7, 0x75, 0xa2, 0x49, 0xa8, - 0x26, 0x9a, 0xef, 0x94, 0x26, 0x81, 0x09, 0x5d, 0x1a, 0x56, 0xdf, 0x21, 0x21, 0x2e, 0xc1, 0xb4, - 0x32, 0x14, 0xf3, 0xdc, 0xfa, 0xbe, 0x96, 0x0d, 0x4d, 0x39, 0xad, 0xd4, 0xd2, 0x08, 0xb8, 0xb3, - 0x0e, 0x5a, 0x84, 0x49, 0xa3, 0x70, 0xad, 0x22, 0x84, 0x7d, 0xa5, 0xba, 0xa8, 0x99, 0x60, 0x9c, - 0xc6, 0xb7, 0xbf, 0x66, 0xc1, 0x83, 0x39, 0x31, 0x8e, 0xfb, 0x8e, 0x78, 0xb0, 0x05, 0x93, 0x2d, - 0xb3, 0x6a, 0x8f, 0xc0, 0x28, 0x46, 0x24, 0x65, 0xd5, 0xd7, 0x14, 0x00, 0xa7, 0x89, 0x2e, 0x5d, - 0xf8, 0xf6, 0xf7, 0xcf, 0x7d, 0xe4, 0x0f, 0xbe, 0x7f, 0xee, 0x23, 0xdf, 0xfb, 0xfe, 0xb9, 0x8f, - 0xfc, 0xf9, 0xbb, 0xe7, 0xac, 0x6f, 0xdf, 0x3d, 0x67, 0xfd, 0xc1, 0xdd, 0x73, 0xd6, 0xf7, 0xee, - 0x9e, 0xb3, 0xfe, 0xf8, 0xee, 0x39, 0xeb, 0x4b, 0x3f, 0x38, 0xf7, 0x91, 0x37, 0x0b, 0x7b, 0xcf, - 0xfe, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0xa0, 0xb7, 0x43, 0x3c, 0xd5, 0x00, 0x00, + // 12346 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, + 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0xf5, 0x9b, 0xef, 0xdc, 0x5d, 0xa9, 0x77, 0x24, 0x6d, 0xaf, 0x4a, + 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xcb, 0xe9, 0x4e, 0x30, 0x33, 0x3d, 0xb3, 0xdb, + 0xda, 0x9d, 0xd9, 0x56, 0xf6, 0xec, 0xee, 0x9d, 0x10, 0xe7, 0xab, 0xe9, 0xce, 0x99, 0x29, 0x4d, + 0x4d, 0x55, 0xab, 0xaa, 0x7a, 0x76, 0x47, 0x01, 0x11, 0xf6, 0x19, 0xf0, 0x07, 0xfc, 0x20, 0x6c, + 0xc2, 0xc6, 0x40, 0xe0, 0x08, 0x1b, 0x07, 0x9c, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x80, 0x6d, 0x8c, + 0x1d, 0x0e, 0xf8, 0x83, 0xc1, 0xfe, 0x71, 0x44, 0x10, 0x1e, 0xc3, 0x40, 0xd8, 0xc1, 0x0f, 0x3b, + 0x6c, 0xf3, 0x8b, 0x31, 0x36, 0x8e, 0xfc, 0xac, 0xcc, 0xea, 0xaa, 0xee, 0x9e, 0xd5, 0xec, 0x48, + 0x10, 0xf7, 0xaf, 0x3b, 0xdf, 0xcb, 0x97, 0x59, 0xf9, 0xf1, 0xf2, 0xe5, 0xcb, 0xf7, 0x01, 0x6f, + 0xee, 0x5e, 0x8b, 0xe6, 0xdd, 0x60, 0x61, 0xb7, 0xb3, 0x49, 0x42, 0x9f, 0xc4, 0x24, 0x5a, 0xd8, + 0x27, 0x7e, 0x2b, 0x08, 0x17, 0x04, 0xc0, 0x69, 0xbb, 0x0b, 0xcd, 0x20, 0x24, 0x0b, 0xfb, 0xaf, + 0x2c, 0x6c, 0x13, 0x9f, 0x84, 0x4e, 0x4c, 0x5a, 0xf3, 0xed, 0x30, 0x88, 0x03, 0x84, 0x38, 0xce, + 0xbc, 0xd3, 0x76, 0xe7, 0x29, 0xce, 0xfc, 0xfe, 0x2b, 0x73, 0x2f, 0x6f, 0xbb, 0xf1, 0x4e, 0x67, + 0x73, 0xbe, 0x19, 0xec, 0x2d, 0x6c, 0x07, 0xdb, 0xc1, 0x02, 0x43, 0xdd, 0xec, 0x6c, 0xb1, 0x7f, + 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xad, 0x25, 0xcd, 0x90, 0x07, 0x31, 0xf1, 0x23, 0x37, 0xf0, + 0xa3, 0x97, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x0b, 0xed, 0xdd, 0x6d, 0x0a, 0x8b, 0x4c, + 0x84, 0x85, 0xfd, 0x57, 0x36, 0x49, 0xec, 0x74, 0xf5, 0x68, 0xee, 0xb5, 0x84, 0xdc, 0x9e, 0xd3, + 0xdc, 0x71, 0x7d, 0x12, 0x1e, 0x48, 0x1a, 0x0b, 0x21, 0x89, 0x82, 0x4e, 0xd8, 0x24, 0x27, 0xaa, + 0x15, 0x2d, 0xec, 0x91, 0xd8, 0xc9, 0xf8, 0xfa, 0xb9, 0x85, 0xbc, 0x5a, 0x61, 0xc7, 0x8f, 0xdd, + 0xbd, 0xee, 0x66, 0x3e, 0xd7, 0xaf, 0x42, 0xd4, 0xdc, 0x21, 0x7b, 0x4e, 0x57, 0xbd, 0x57, 0xf3, + 0xea, 0x75, 0x62, 0xd7, 0x5b, 0x70, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x1b, 0x16, 0x5c, + 0x5e, 0xbc, 0xd7, 0x58, 0xf1, 0x9c, 0x28, 0x76, 0x9b, 0x4b, 0x5e, 0xd0, 0xdc, 0x6d, 0xc4, 0x41, + 0x48, 0xee, 0x06, 0x5e, 0x67, 0x8f, 0x34, 0xd8, 0x40, 0xa0, 0x97, 0x60, 0x6c, 0x9f, 0xfd, 0xaf, + 0x55, 0xcb, 0xd6, 0x65, 0xeb, 0x4a, 0x69, 0x69, 0xe6, 0xd7, 0x0e, 0x2b, 0x9f, 0x3a, 0x3a, 0xac, + 0x8c, 0xdd, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x64, 0x2b, 0xda, 0x38, 0x68, 0x93, 0x72, + 0x81, 0xe1, 0x4e, 0x09, 0xdc, 0x91, 0xd5, 0x06, 0x2d, 0xc5, 0x02, 0x8a, 0x16, 0xa0, 0xd4, 0x76, + 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x2f, 0x17, 0x2f, 0x5b, 0x57, 0x86, 0x97, 0x66, 0x05, 0x6a, 0xa9, + 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xd3, 0xba, 0xed, 0x7b, 0x07, 0xe5, 0xa1, 0xcb, + 0xd6, 0x95, 0xb1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x70, 0x01, 0xc6, 0x16, 0xb7, + 0xb6, 0x5c, 0xdf, 0x8d, 0x0f, 0xd0, 0x5d, 0x98, 0xf0, 0x83, 0x16, 0x91, 0xff, 0xd9, 0x57, 0x8c, + 0x5f, 0xbd, 0x3c, 0xdf, 0xbd, 0x32, 0xe7, 0xd7, 0x35, 0xbc, 0xa5, 0x99, 0xa3, 0xc3, 0xca, 0x84, + 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x78, 0x3b, 0x68, 0x29, 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, + 0xb6, 0x9e, 0xa0, 0x2d, 0x4d, 0x1f, 0x1d, 0x56, 0xc6, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, 0x30, + 0x4d, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x67, 0xf2, 0xe8, 0x6a, 0xa8, 0x4b, 0xe7, + 0x8e, 0x0e, 0x2b, 0xd3, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0x7f, 0x08, 0x53, 0x8b, 0x71, 0xec, 0x34, + 0x77, 0x48, 0x8b, 0xcf, 0x20, 0x7a, 0x0d, 0x86, 0x7c, 0x67, 0x8f, 0x88, 0xf9, 0xbd, 0x2c, 0x06, + 0x76, 0x68, 0xdd, 0xd9, 0x23, 0xc7, 0x87, 0x95, 0x99, 0x3b, 0xbe, 0xfb, 0x41, 0x47, 0xac, 0x0a, + 0x5a, 0x86, 0x19, 0x36, 0xba, 0x0a, 0xd0, 0x22, 0xfb, 0x6e, 0x93, 0xd4, 0x9d, 0x78, 0x47, 0xcc, + 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0x1f, 0x40, 0x69, 0x71, 0x3f, 0x70, 0x5b, + 0xf5, 0xa0, 0x15, 0xa1, 0x5d, 0x98, 0x6e, 0x87, 0x64, 0x8b, 0x84, 0xaa, 0xa8, 0x6c, 0x5d, 0x2e, + 0x5e, 0x19, 0xbf, 0x7a, 0x25, 0xf3, 0x63, 0x4d, 0xd4, 0x15, 0x3f, 0x0e, 0x0f, 0x96, 0x1e, 0x17, + 0xed, 0x4d, 0xa7, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0x77, 0x05, 0xb8, 0xb0, 0xf8, 0x61, 0x27, 0x24, + 0x55, 0x37, 0xda, 0x4d, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xae, 0x27, 0x23, 0xa0, 0x96, 0x56, 0x55, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x86, 0x51, 0xfa, 0xfb, 0x0e, 0xae, 0x89, 0x4f, 0x3e, 0x27, 0x90, + 0xc7, 0xab, 0x4e, 0xec, 0x54, 0x39, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0xe3, 0x4d, 0xb6, 0x21, 0xb7, + 0xd7, 0x82, 0x16, 0x61, 0x93, 0x59, 0x5a, 0x7a, 0x91, 0xa2, 0x2f, 0x27, 0xc5, 0xc7, 0x87, 0x95, + 0x32, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x0d, 0x31, 0x4a, 0x90, + 0xb1, 0xb7, 0xae, 0x68, 0x5b, 0x65, 0x98, 0x6d, 0x95, 0x89, 0xec, 0x6d, 0x82, 0x5e, 0x81, 0xa1, + 0x5d, 0xd7, 0x6f, 0x95, 0x47, 0x18, 0xad, 0xa7, 0xe8, 0x9c, 0xdf, 0x74, 0xfd, 0xd6, 0xf1, 0x61, + 0x65, 0xd6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x23, 0x0b, 0x2a, 0x0c, 0xb6, 0xea, 0x7a, + 0xa4, 0x4e, 0xc2, 0xc8, 0x8d, 0x62, 0xe2, 0xc7, 0xc6, 0x80, 0x5e, 0x05, 0x88, 0x48, 0x33, 0x24, + 0xb1, 0x36, 0xa4, 0x6a, 0x61, 0x34, 0x14, 0x04, 0x6b, 0x58, 0x94, 0x21, 0x44, 0x3b, 0x4e, 0xc8, + 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x10, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0x83, 0x21, 0x14, 0xfb, 0x31, + 0x04, 0xf4, 0x45, 0x98, 0x4e, 0x1a, 0x8b, 0xda, 0x4e, 0x53, 0x0e, 0x20, 0xdb, 0x32, 0x0d, 0x13, + 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb4, 0xc4, 0xe2, 0xa1, 0x5f, 0xfd, 0x09, 0xff, 0x56, 0xfb, 0x17, + 0x2c, 0x18, 0x5d, 0x72, 0xfd, 0x96, 0xeb, 0x6f, 0xa3, 0xaf, 0xc2, 0x18, 0x3d, 0x9b, 0x5a, 0x4e, + 0xec, 0x08, 0xbe, 0xf7, 0x59, 0x6d, 0x6f, 0xa9, 0xa3, 0x62, 0xbe, 0xbd, 0xbb, 0x4d, 0x0b, 0xa2, + 0x79, 0x8a, 0x4d, 0x77, 0xdb, 0xed, 0xcd, 0xf7, 0x49, 0x33, 0x5e, 0x23, 0xb1, 0x93, 0x7c, 0x4e, + 0x52, 0x86, 0x15, 0x55, 0x74, 0x13, 0x46, 0x62, 0x27, 0xdc, 0x26, 0xb1, 0x60, 0x80, 0x99, 0x8c, + 0x8a, 0xd7, 0xc4, 0x74, 0x47, 0x12, 0xbf, 0x49, 0x92, 0x63, 0x61, 0x83, 0x55, 0xc5, 0x82, 0x84, + 0xfd, 0x53, 0x16, 0x5c, 0x5c, 0x6e, 0xd4, 0x72, 0xd6, 0xd5, 0xb3, 0x30, 0xd2, 0x0a, 0xdd, 0x7d, + 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, 0x28, 0xba, 0x06, 0x13, 0xfc, 0x40, 0xba, + 0xe1, 0xf8, 0x2d, 0x4f, 0x0e, 0xf1, 0x79, 0x81, 0x3d, 0x71, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x84, + 0x03, 0xdd, 0x84, 0x89, 0x65, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0xcf, 0x41, + 0xd1, 0x69, 0xb5, 0x18, 0x0f, 0x2b, 0x2d, 0x5d, 0x38, 0x3a, 0xac, 0x14, 0x17, 0x5b, 0x74, 0x33, + 0x81, 0xc2, 0x3a, 0xc0, 0x14, 0x03, 0xbd, 0x00, 0x43, 0xad, 0x30, 0x68, 0x97, 0x0b, 0x0c, 0xf3, + 0x31, 0xba, 0xef, 0xaa, 0x61, 0xd0, 0x4e, 0xa1, 0x32, 0x1c, 0xfb, 0x57, 0x0a, 0xf0, 0xe4, 0x32, + 0x69, 0xef, 0xac, 0x36, 0x72, 0x46, 0xe5, 0x0a, 0x8c, 0xed, 0x05, 0xbe, 0x1b, 0x07, 0x61, 0x24, + 0x9a, 0x66, 0xdb, 0x7d, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x97, 0x61, 0xa8, 0x9d, 0xb0, 0xea, 0x09, + 0xc9, 0xe6, 0x19, 0x93, 0x66, 0x10, 0x8a, 0xd1, 0x89, 0x48, 0x28, 0xd8, 0x94, 0xc2, 0xb8, 0x13, + 0x91, 0x10, 0x33, 0x48, 0xb2, 0xde, 0xe9, 0x4e, 0x10, 0x7b, 0x28, 0xb5, 0xde, 0x29, 0x04, 0x6b, + 0x58, 0xa8, 0x0e, 0x25, 0xfe, 0x0f, 0x93, 0x2d, 0xc6, 0x91, 0x72, 0x56, 0x49, 0x43, 0x22, 0x89, + 0x55, 0x32, 0xc9, 0x36, 0x84, 0x2c, 0xc4, 0x09, 0x11, 0x63, 0x9e, 0x46, 0xfa, 0xce, 0xd3, 0x2f, + 0x15, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, 0x07, 0x2e, 0xf3, 0x68, 0xbc, 0x15, + 0x34, 0x1d, 0x2f, 0xbd, 0xc7, 0x4e, 0x6b, 0xf4, 0x7e, 0xc8, 0x02, 0xb4, 0xec, 0xfa, 0x2d, 0x12, + 0x9e, 0x81, 0x5c, 0x78, 0xb2, 0x0d, 0x78, 0x0b, 0xa6, 0x96, 0x3d, 0x97, 0xf8, 0x71, 0xad, 0xbe, + 0x1c, 0xf8, 0x5b, 0xee, 0x36, 0xfa, 0x3c, 0x4c, 0x51, 0x31, 0x39, 0xe8, 0xc4, 0x0d, 0xd2, 0x0c, + 0x7c, 0x26, 0x51, 0x50, 0xe1, 0x12, 0x1d, 0x1d, 0x56, 0xa6, 0x36, 0x0c, 0x08, 0x4e, 0x61, 0xda, + 0xbf, 0x43, 0x3f, 0x34, 0xd8, 0x6b, 0x07, 0x3e, 0xf1, 0xe3, 0xe5, 0xc0, 0x6f, 0x71, 0xc9, 0xf3, + 0xf3, 0x30, 0x14, 0xd3, 0x8e, 0xf3, 0x8f, 0x7c, 0x56, 0x4e, 0x2d, 0xed, 0xee, 0xf1, 0x61, 0xe5, + 0xb1, 0xee, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x2d, 0x30, 0x12, 0xc5, 0x4e, 0xdc, 0x89, 0xc4, + 0x67, 0x3f, 0x2d, 0x3f, 0xbb, 0xc1, 0x4a, 0x8f, 0x0f, 0x2b, 0xd3, 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, + 0x02, 0x7a, 0x1e, 0x46, 0xf7, 0x48, 0x14, 0x39, 0xdb, 0x52, 0x68, 0x98, 0x16, 0x75, 0x47, 0xd7, + 0x78, 0x31, 0x96, 0x70, 0xf4, 0x0c, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0x26, 0x05, 0xe2, + 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x07, 0x0b, 0xa6, 0x55, 0x5f, 0x79, 0x5b, 0x67, 0x70, + 0x3a, 0xbc, 0x0b, 0xd0, 0x94, 0x1f, 0x18, 0x31, 0x7e, 0x37, 0x7e, 0xf5, 0xd9, 0xac, 0x25, 0xdc, + 0x3d, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0x2f, 0x2d, 0x38, 0x97, 0xfa, 0xa2, + 0x5b, 0x6e, 0x14, 0xa3, 0xf7, 0xba, 0xbe, 0x6a, 0x7e, 0xb0, 0xaf, 0xa2, 0xb5, 0xd9, 0x37, 0xa9, + 0x35, 0x27, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0xc3, 0x6e, 0x4c, 0xf6, 0xe4, 0xc7, 0x3c, 0xd3, 0xf3, + 0x63, 0x78, 0xaf, 0x92, 0x19, 0xa9, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, 0xbe, + 0x6c, 0xd7, 0x9c, 0xf6, 0x19, 0xcc, 0x45, 0x0d, 0x86, 0x18, 0x75, 0xde, 0xf1, 0xe7, 0xb2, 0x3b, + 0x2e, 0xba, 0x33, 0x4f, 0x45, 0x3f, 0x2e, 0x62, 0x2b, 0x66, 0x46, 0x8b, 0x30, 0x23, 0x31, 0xf7, + 0x06, 0x94, 0x14, 0x02, 0x9a, 0x81, 0xe2, 0x2e, 0xe1, 0xd7, 0xaa, 0x12, 0xa6, 0x3f, 0xd1, 0x79, + 0x18, 0xde, 0x77, 0xbc, 0x8e, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x7c, 0xe1, 0x9a, 0x65, 0xff, 0x22, + 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x5f, 0x30, 0x93, 0x0f, 0xe1, 0xbc, 0x97, 0xc1, 0xc3, 0xc4, + 0x40, 0x0c, 0xce, 0xf3, 0x9e, 0x14, 0x7d, 0x3d, 0x9f, 0x05, 0xc5, 0x99, 0x6d, 0xd0, 0x63, 0x20, + 0x68, 0xd3, 0x15, 0xe5, 0x78, 0xac, 0xbf, 0x42, 0x5c, 0xbe, 0x2d, 0xca, 0xb0, 0x82, 0x52, 0x06, + 0x71, 0x5e, 0x75, 0xfe, 0x26, 0x39, 0x68, 0x10, 0x8f, 0x34, 0xe3, 0x20, 0xfc, 0x58, 0xbb, 0xff, + 0x14, 0x1f, 0x7d, 0xce, 0x5f, 0xc6, 0x05, 0x81, 0xe2, 0x4d, 0x72, 0xc0, 0xa7, 0x42, 0xff, 0xba, + 0x62, 0xcf, 0xaf, 0xfb, 0x69, 0x0b, 0x26, 0xd5, 0xd7, 0x9d, 0xc1, 0x46, 0x5a, 0x32, 0x37, 0xd2, + 0x53, 0x3d, 0xd7, 0x63, 0xce, 0x16, 0xfa, 0x53, 0xc6, 0x02, 0x04, 0x4e, 0x3d, 0x0c, 0xe8, 0xd0, + 0x50, 0x9e, 0xfd, 0x71, 0x4e, 0xc8, 0x20, 0xdf, 0x75, 0x93, 0x1c, 0x6c, 0x04, 0x54, 0x7c, 0xc8, + 0xfe, 0x2e, 0x63, 0xd6, 0x86, 0x7a, 0xce, 0xda, 0xcf, 0x16, 0xe0, 0x82, 0x1a, 0x01, 0xe3, 0x80, + 0xfe, 0xb3, 0x3e, 0x06, 0xaf, 0xc0, 0x78, 0x8b, 0x6c, 0x39, 0x1d, 0x2f, 0x56, 0x37, 0xe7, 0x61, + 0xae, 0x3d, 0xa9, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc1, 0xb0, 0xfd, 0xf8, 0x38, 0xe3, 0xbd, 0xb1, + 0x43, 0x57, 0x30, 0x95, 0xde, 0x34, 0xfd, 0xc7, 0x84, 0xae, 0xff, 0x10, 0xba, 0x8e, 0x67, 0x60, + 0xd8, 0xdd, 0xa3, 0x67, 0x71, 0xc1, 0x3c, 0x62, 0x6b, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x0c, 0x8c, + 0x36, 0x83, 0xbd, 0x3d, 0xc7, 0x6f, 0x95, 0x8b, 0x4c, 0x9e, 0x1c, 0xa7, 0xc7, 0xf5, 0x32, 0x2f, + 0xc2, 0x12, 0x86, 0x9e, 0x84, 0x21, 0x27, 0xdc, 0x8e, 0xca, 0x43, 0x0c, 0x67, 0x8c, 0xb6, 0xb4, + 0x18, 0x6e, 0x47, 0x98, 0x95, 0x52, 0x39, 0xf1, 0x7e, 0x10, 0xee, 0xba, 0xfe, 0x76, 0xd5, 0x0d, + 0x99, 0xd0, 0xa7, 0xc9, 0x89, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x15, 0x86, 0xdb, 0x41, 0x18, + 0x47, 0xe5, 0x11, 0x36, 0xdc, 0x4f, 0xe7, 0x6c, 0x25, 0xfe, 0xb5, 0xf5, 0x20, 0x8c, 0x93, 0x0f, + 0xa0, 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x6f, 0x81, 0x22, 0xf1, 0xf7, 0xcb, 0xa3, 0x8c, 0xca, 0x5c, + 0x16, 0x95, 0x15, 0x7f, 0xff, 0xae, 0x13, 0x26, 0x7c, 0x66, 0xc5, 0xdf, 0xc7, 0xb4, 0x0e, 0xfa, + 0x32, 0x94, 0xa4, 0xee, 0x34, 0x2a, 0x8f, 0xe5, 0x2f, 0x31, 0x2c, 0x90, 0x30, 0xf9, 0xa0, 0xe3, + 0x86, 0x64, 0x8f, 0xf8, 0x71, 0x94, 0xdc, 0x7e, 0x25, 0x34, 0xc2, 0x09, 0x35, 0xf4, 0x65, 0x79, + 0x9d, 0x5b, 0x0b, 0x3a, 0x7e, 0x1c, 0x95, 0x4b, 0xac, 0x7b, 0x99, 0x8a, 0xb6, 0xbb, 0x09, 0x5e, + 0xfa, 0xbe, 0xc7, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xe9, 0xb9, 0xfb, 0xc4, 0x27, 0x51, 0x54, + 0x0f, 0x83, 0x4d, 0x52, 0x06, 0xd6, 0xf3, 0x8b, 0xd9, 0xfa, 0xa7, 0x60, 0x93, 0x2c, 0xcd, 0x1e, + 0x1d, 0x56, 0x26, 0x6f, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x29, 0x2a, 0xa0, 0xba, 0x09, + 0xd1, 0xf1, 0x7e, 0x44, 0x99, 0x74, 0x8a, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x0d, 0x25, 0xcf, + 0xdd, 0x22, 0xcd, 0x83, 0xa6, 0x47, 0xca, 0x13, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xf8, + 0x05, 0x40, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x17, 0x1e, 0x8b, 0x49, 0xb8, 0xe7, 0xfa, 0x0e, 0xdd, + 0x0e, 0x42, 0x9e, 0x64, 0x5a, 0xbc, 0x49, 0xb6, 0xde, 0x2e, 0x89, 0xa1, 0x7b, 0x6c, 0x23, 0x13, + 0x0b, 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x9a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, + 0x0f, 0xca, 0x53, 0x8c, 0xe0, 0x67, 0xa4, 0x9a, 0xae, 0x66, 0x82, 0xe9, 0x8d, 0x37, 0xf9, 0x87, + 0xd3, 0xb5, 0xd1, 0x26, 0x53, 0xdb, 0x74, 0x42, 0x37, 0x3e, 0xa0, 0xeb, 0x97, 0x3c, 0x88, 0xcb, + 0xd3, 0x3d, 0xef, 0x8f, 0x3a, 0xaa, 0xd2, 0xed, 0xe8, 0x85, 0x38, 0x4d, 0x90, 0x6e, 0xed, 0x28, + 0x6e, 0xb9, 0x7e, 0x79, 0x86, 0x71, 0x0c, 0xb5, 0x33, 0x1a, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xd9, + 0xd0, 0x1f, 0xb7, 0x29, 0x07, 0x9d, 0x65, 0x88, 0x89, 0xca, 0x46, 0x02, 0x70, 0x82, 0x43, 0x8f, + 0xe5, 0x38, 0x3e, 0x28, 0x23, 0x86, 0xaa, 0xb6, 0xcb, 0xc6, 0xc6, 0x97, 0x31, 0x2d, 0x47, 0xb7, + 0x60, 0x94, 0xf8, 0xfb, 0xab, 0x61, 0xb0, 0x57, 0x3e, 0x97, 0xbf, 0x67, 0x57, 0x38, 0x0a, 0x67, + 0xe8, 0xc9, 0x05, 0x40, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0x72, 0xc6, 0x8c, 0xf0, 0x09, 0x38, + 0xcf, 0x26, 0xe0, 0x0b, 0xa2, 0x6e, 0x79, 0x23, 0x07, 0xef, 0xb8, 0x07, 0x0c, 0xe7, 0x52, 0x47, + 0xdf, 0x01, 0x93, 0x7c, 0x43, 0x71, 0x7d, 0x6f, 0x54, 0xbe, 0xc0, 0xbe, 0xe6, 0x72, 0xfe, 0xe6, + 0xe4, 0x88, 0x4b, 0x17, 0x44, 0x87, 0x26, 0xf5, 0xd2, 0x08, 0x9b, 0xd4, 0xec, 0x4d, 0x98, 0x52, + 0x7c, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x61, 0xca, 0x90, 0xe5, 0x8d, 0xbd, 0x44, 0x67, 0x8a, 0xe9, + 0xe9, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0x7e, 0x48, 0x96, 0x0e, 0x62, 0xc2, 0x6f, 0x5d, 0x45, 0x6d, + 0xa6, 0x24, 0x00, 0x27, 0x38, 0xf6, 0xff, 0xe3, 0x72, 0x4f, 0xc2, 0x1c, 0x07, 0x38, 0x0e, 0x5e, + 0x82, 0xb1, 0x9d, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x38, 0x91, 0x74, 0x6e, 0x88, 0x72, 0xac, + 0x30, 0xd0, 0x9b, 0x30, 0xd9, 0xd4, 0x1b, 0x10, 0x67, 0x99, 0x1a, 0x02, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x6b, 0x30, 0xc6, 0x5e, 0x6b, 0x9a, 0x81, 0x27, 0xee, 0x77, 0xf2, 0x40, 0x1e, 0xab, 0x8b, + 0xf2, 0x63, 0xed, 0x37, 0x56, 0xd8, 0xf4, 0xce, 0x4d, 0xbb, 0x50, 0xab, 0x8b, 0x53, 0x44, 0xdd, + 0xb9, 0x6f, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0xdf, 0x28, 0x68, 0xa3, 0x4c, 0x6f, 0x2c, 0x04, 0xd5, + 0x61, 0xf4, 0xbe, 0xe3, 0xc6, 0xae, 0xbf, 0x2d, 0xc4, 0x85, 0xe7, 0x7b, 0x1e, 0x29, 0xac, 0xd2, + 0x3d, 0x5e, 0x81, 0x1f, 0x7a, 0xe2, 0x0f, 0x96, 0x64, 0x28, 0xc5, 0xb0, 0xe3, 0xfb, 0x94, 0x62, + 0x61, 0x50, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f, 0x2c, 0xc9, 0xa0, 0xf7, 0x00, 0xe4, 0xb2, + 0x24, 0x2d, 0xf1, 0x4a, 0xf2, 0x52, 0x7f, 0xa2, 0x1b, 0xaa, 0xce, 0xd2, 0x14, 0x3d, 0x52, 0x93, + 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x4c, 0xac, 0xea, 0xee, 0x0c, 0xfa, 0x76, 0xca, 0x09, 0x9c, 0x30, + 0x26, 0xad, 0xc5, 0x58, 0x0c, 0xce, 0x0b, 0x83, 0x49, 0xc5, 0x1b, 0xee, 0x1e, 0xd1, 0xb9, 0x86, + 0x20, 0x82, 0x13, 0x7a, 0xf6, 0xcf, 0x17, 0xa1, 0x9c, 0xd7, 0x5d, 0xba, 0xe8, 0xc8, 0x03, 0x37, + 0x5e, 0xa6, 0xd2, 0x90, 0x65, 0x2e, 0xba, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x9d, 0xfd, 0xc8, 0xdd, + 0x96, 0x97, 0x9a, 0xe1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, + 0x67, 0x38, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x47, 0x0c, 0xf5, 0xd1, 0x47, 0x18, + 0x43, 0x34, 0x7c, 0xba, 0x43, 0x84, 0xbe, 0x02, 0xb0, 0xe5, 0xfa, 0x6e, 0xb4, 0xc3, 0xa8, 0x8f, + 0x9c, 0x98, 0xba, 0x92, 0xa5, 0x56, 0x15, 0x15, 0xac, 0x51, 0x44, 0xaf, 0xc3, 0xb8, 0xda, 0x80, + 0xb5, 0x6a, 0x79, 0xd4, 0x7c, 0xe3, 0x49, 0xb8, 0x51, 0x15, 0xeb, 0x78, 0xf6, 0xfb, 0xe9, 0xf5, + 0x22, 0x76, 0x80, 0x36, 0xbe, 0xd6, 0xa0, 0xe3, 0x5b, 0xe8, 0x3d, 0xbe, 0xf6, 0xaf, 0x16, 0x61, + 0xda, 0x68, 0xac, 0x13, 0x0d, 0xc0, 0xb3, 0xae, 0xd3, 0x73, 0xce, 0x89, 0x89, 0xd8, 0x7f, 0x76, + 0xff, 0xad, 0xa2, 0x9f, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x2b, 0x50, 0xf2, 0x9c, 0x88, 0xe9, + 0x36, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xee, 0x11, 0x4e, 0x14, 0x6b, 0x47, 0x0d, 0xa7, 0x9d, + 0x90, 0xa4, 0x07, 0x32, 0x95, 0x7d, 0xe4, 0x3b, 0xaf, 0xea, 0x04, 0x15, 0x90, 0x0e, 0x30, 0x87, + 0xa1, 0x6b, 0x30, 0x11, 0x12, 0xb6, 0x2a, 0x96, 0xa9, 0x28, 0xc7, 0x96, 0xd9, 0x70, 0x22, 0xf3, + 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x44, 0x94, 0x1f, 0xe9, 0x21, 0xca, 0x3f, 0x0f, 0xa3, 0xec, 0x87, + 0x5a, 0x01, 0x6a, 0x36, 0x6a, 0xbc, 0x18, 0x4b, 0x78, 0x7a, 0xc1, 0x8c, 0x0d, 0xb8, 0x60, 0x5e, + 0x80, 0xa9, 0xaa, 0x43, 0xf6, 0x02, 0x7f, 0xc5, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, 0x51, 0x19, 0x86, + 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x10, 0xa5, 0x80, 0x87, 0xa8, 0x60, 0x6e, 0xff, 0x56, 0x01, 0x26, + 0xab, 0xc4, 0x23, 0x31, 0xe1, 0x57, 0x99, 0x08, 0xad, 0x02, 0xda, 0x0e, 0x9d, 0x26, 0xa9, 0x93, + 0xd0, 0x0d, 0x5a, 0xba, 0xae, 0xb3, 0xc8, 0xde, 0x13, 0xd0, 0xf5, 0x2e, 0x28, 0xce, 0xa8, 0x81, + 0xde, 0x85, 0xc9, 0x76, 0x48, 0x0c, 0x15, 0x9d, 0x95, 0x27, 0x8d, 0xd4, 0x75, 0x44, 0x2e, 0x08, + 0x1b, 0x45, 0xd8, 0x24, 0x85, 0xbe, 0x0d, 0x66, 0x82, 0xb0, 0xbd, 0xe3, 0xf8, 0x55, 0xd2, 0x26, + 0x7e, 0x8b, 0x4a, 0xfa, 0x42, 0x05, 0x71, 0xfe, 0xe8, 0xb0, 0x32, 0x73, 0x3b, 0x05, 0xc3, 0x5d, + 0xd8, 0xe8, 0x5d, 0x98, 0x6d, 0x87, 0x41, 0xdb, 0xd9, 0x66, 0x0b, 0x45, 0x08, 0x34, 0x9c, 0xfb, + 0xbc, 0x74, 0x74, 0x58, 0x99, 0xad, 0xa7, 0x81, 0xc7, 0x87, 0x95, 0x73, 0x6c, 0xa0, 0x68, 0x49, + 0x02, 0xc4, 0xdd, 0x64, 0xec, 0x6d, 0xb8, 0x50, 0x0d, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, 0x58, + 0xaf, 0x69, 0xba, 0x83, 0x75, 0x79, 0x77, 0xe5, 0x6f, 0xd1, 0x99, 0xe7, 0x94, 0x56, 0x93, 0xcb, + 0x2f, 0xab, 0xae, 0x47, 0x72, 0x74, 0x14, 0x7f, 0xbb, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x67, 0x05, + 0x2b, 0xf7, 0x59, 0xe1, 0x1d, 0x18, 0xdb, 0x72, 0x89, 0xd7, 0xc2, 0x64, 0x4b, 0xcc, 0xcc, 0x73, + 0xf9, 0xcf, 0x6b, 0xab, 0x14, 0x53, 0xea, 0xa4, 0xf8, 0xcd, 0x77, 0x55, 0x54, 0xc6, 0x8a, 0x0c, + 0xda, 0x85, 0x19, 0x79, 0xb5, 0x92, 0x50, 0xb1, 0x89, 0x9f, 0xef, 0x75, 0x5f, 0x33, 0x89, 0xb3, + 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x8b, 0x30, 0xbd, 0xea, 0xee, 0xd1, 0xe3, 0x6a, 0x88, 0x2d, 0x69, + 0x76, 0xd5, 0x65, 0xb7, 0x76, 0x56, 0x6a, 0xff, 0xa8, 0x05, 0x8f, 0x77, 0x8d, 0x8c, 0xd0, 0x5e, + 0x9c, 0xf2, 0x2c, 0xa4, 0xb5, 0x09, 0x85, 0xfe, 0xda, 0x04, 0xfb, 0x1f, 0x59, 0x70, 0x7e, 0x65, + 0xaf, 0x1d, 0x1f, 0x54, 0x5d, 0xf3, 0xe9, 0xe3, 0x0d, 0x18, 0xd9, 0x23, 0x2d, 0xb7, 0xb3, 0x27, + 0x66, 0xae, 0x22, 0x59, 0xfa, 0x1a, 0x2b, 0x3d, 0x3e, 0xac, 0x4c, 0x36, 0xe2, 0x20, 0x74, 0xb6, + 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x1d, 0x8c, 0xee, 0x87, 0xe4, 0x96, 0xbb, 0xe7, 0xca, 0xe7, 0xd2, + 0x9e, 0x1a, 0xb5, 0x79, 0x39, 0xa0, 0xf3, 0xef, 0x74, 0x1c, 0x3f, 0x76, 0xe3, 0x03, 0xf1, 0xaa, + 0x23, 0x89, 0xe0, 0x84, 0x9e, 0xfd, 0x0d, 0x0b, 0xa6, 0x25, 0x2f, 0x59, 0x6c, 0xb5, 0x42, 0x12, + 0x45, 0x68, 0x0e, 0x0a, 0x6e, 0x5b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xb5, 0x3a, 0x2e, 0xb8, 0x6d, + 0x54, 0x87, 0x12, 0x7f, 0x75, 0x4d, 0x16, 0xd7, 0x40, 0x6f, 0xb7, 0xac, 0x07, 0x1b, 0xb2, 0x26, + 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, 0x49, 0xe8, 0x86, 0x28, 0xc7, 0x0a, 0x03, + 0x5d, 0x81, 0x31, 0x3f, 0x68, 0xf1, 0x47, 0x70, 0xbe, 0xa7, 0xd9, 0x92, 0x5d, 0x17, 0x65, 0x58, + 0x41, 0xed, 0xef, 0xb7, 0x60, 0x42, 0x7e, 0xd9, 0x80, 0x02, 0x3a, 0xdd, 0x5a, 0x89, 0x70, 0x9e, + 0x6c, 0x2d, 0x2a, 0x60, 0x33, 0x88, 0x21, 0x57, 0x17, 0x4f, 0x22, 0x57, 0xdb, 0x3f, 0x52, 0x80, + 0x29, 0xd9, 0x9d, 0x46, 0x67, 0x33, 0x22, 0x31, 0xda, 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, + 0xfb, 0x4c, 0xf6, 0x85, 0xce, 0x98, 0x9f, 0x44, 0xd4, 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, + 0x30, 0xeb, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xd7, 0xb3, 0x43, 0x9a, 0xfa, 0x45, 0x41, 0x7d, + 0x76, 0x3d, 0x4d, 0x05, 0x77, 0x13, 0x46, 0x2b, 0x52, 0x89, 0x54, 0xcc, 0xbf, 0xc2, 0xe9, 0xb3, + 0x90, 0xad, 0x43, 0xb2, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x8b, 0x17, 0xa6, 0x35, 0x18, 0x8d, + 0xd8, 0x24, 0xc8, 0xa1, 0xb1, 0x7b, 0x75, 0x9c, 0xcf, 0x57, 0x72, 0x9a, 0xf3, 0xff, 0x11, 0x96, + 0x34, 0x98, 0x16, 0x5c, 0x75, 0xff, 0x13, 0xa2, 0x05, 0x57, 0xfd, 0xc9, 0x39, 0x61, 0xfe, 0x1b, + 0xeb, 0xb3, 0xa6, 0x2a, 0xa0, 0x42, 0x67, 0x3b, 0x24, 0x5b, 0xee, 0x83, 0xb4, 0xd0, 0x59, 0x67, + 0xa5, 0x58, 0x40, 0xd1, 0x7b, 0x30, 0xd1, 0x94, 0xca, 0xe3, 0x84, 0x0d, 0x3c, 0xdb, 0x53, 0x15, + 0xaf, 0x5e, 0x6d, 0xb8, 0x81, 0xdc, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0x77, 0xff, 0x62, 0xbf, + 0x77, 0xff, 0x84, 0x6e, 0xee, 0xcb, 0xb5, 0xfd, 0x63, 0x16, 0x8c, 0x70, 0x15, 0xe4, 0x60, 0x3a, + 0x5b, 0xed, 0x15, 0x2a, 0x19, 0xbb, 0xbb, 0xb4, 0x50, 0x3c, 0x4a, 0xa1, 0x35, 0x28, 0xb1, 0x1f, + 0x4c, 0x15, 0x53, 0xcc, 0xb7, 0x0c, 0xe4, 0xad, 0xea, 0x1d, 0xbc, 0x2b, 0xab, 0xe1, 0x84, 0x82, + 0xfd, 0x83, 0x45, 0xca, 0xaa, 0x12, 0x54, 0xe3, 0x04, 0xb7, 0x1e, 0xdd, 0x09, 0x5e, 0x78, 0x54, + 0x27, 0xf8, 0x36, 0x4c, 0x37, 0xb5, 0x27, 0xaf, 0x64, 0x26, 0xaf, 0xf4, 0x5c, 0x24, 0xda, 0xeb, + 0x18, 0x57, 0xc3, 0x2d, 0x9b, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x76, 0x98, 0xe0, 0xf3, 0x2c, 0x5a, + 0x19, 0x62, 0xad, 0x7c, 0x26, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95, 0xd8, 0xd0, 0xaa, 0x63, 0x83, + 0x98, 0xfd, 0xf3, 0x63, 0x30, 0xbc, 0xb2, 0x4f, 0xfc, 0xf8, 0x0c, 0x18, 0x52, 0x13, 0xa6, 0x5c, + 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x8b, 0xc3, 0x4f, 0x72, 0xb8, 0x3e, 0x26, 0x48, 0x4f, 0xd5, 0x0c, + 0x12, 0x38, 0x45, 0xf2, 0x51, 0xdc, 0xda, 0xaf, 0xc3, 0x08, 0x9f, 0x7b, 0x71, 0x65, 0xcf, 0x54, + 0xb0, 0xb3, 0x41, 0x14, 0xbb, 0x20, 0xd1, 0x28, 0x70, 0x8d, 0xbe, 0xa8, 0x8e, 0xde, 0x87, 0xa9, + 0x2d, 0x37, 0x8c, 0x62, 0x7a, 0xdd, 0x8e, 0x62, 0x67, 0xaf, 0xfd, 0x10, 0xb7, 0x74, 0x35, 0x0e, + 0xab, 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0xb6, 0x61, 0x92, 0x5e, 0x1c, 0x93, 0xa6, 0x46, 0x4f, 0xdc, + 0x94, 0x52, 0xc3, 0xdd, 0xd2, 0x09, 0x61, 0x93, 0x2e, 0x65, 0x26, 0x4d, 0x76, 0xd1, 0x1c, 0x63, + 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, 0x27, 0x31, 0x53, 0x91, 0x92, 0xc9, 0x93, + 0x34, 0x83, 0x90, 0xaf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x63, 0xc3, 0xc2, 0x60, 0x7d, + 0x5d, 0x73, 0x9b, 0x61, 0x60, 0xea, 0x47, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, + 0x91, 0xd0, 0x25, 0x91, 0x78, 0x76, 0xe8, 0x31, 0x8d, 0x0c, 0x8d, 0x9b, 0x90, 0xf2, 0xdf, 0x58, + 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0xb7, 0x21, 0xf6, 0xd2, 0xa0, 0x2d, 0xaf, 0x45, 0x56, 0x8a, 0x05, + 0x14, 0xbd, 0x0d, 0xa3, 0x21, 0xf1, 0x98, 0x02, 0x6e, 0x72, 0xf0, 0x45, 0xce, 0xf5, 0x79, 0xbc, + 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x21, 0xa1, 0x32, 0x84, 0xeb, 0x6f, 0x2b, 0x03, 0x0a, 0xf1, + 0x7e, 0xf0, 0x84, 0x68, 0xff, 0x1c, 0x4e, 0x30, 0xfc, 0x38, 0x0c, 0x3c, 0x8f, 0x84, 0x38, 0xa3, + 0x1a, 0xba, 0x0e, 0xb3, 0xaa, 0xb4, 0xe6, 0x47, 0xb1, 0xe3, 0x37, 0x09, 0x7b, 0x3a, 0x28, 0x25, + 0x52, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0xd7, 0xa9, 0x38, 0x43, 0x47, 0xeb, 0x0c, 0x64, + 0x81, 0xb7, 0x4c, 0x59, 0xe0, 0x62, 0xee, 0xcc, 0xe5, 0xc8, 0x01, 0x47, 0x16, 0x8c, 0x6b, 0x33, + 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, 0xc0, 0x0c, 0x5d, 0xe9, 0xb7, 0x37, 0x99, 0x37, 0x45, + 0x8b, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, 0xcc, 0xb2, 0x68, 0x60, 0xe6, 0x56, 0x8a, 0x20, 0xee, 0x6a, + 0x02, 0xbd, 0x21, 0xb5, 0x51, 0x45, 0xc3, 0x30, 0x8a, 0x6b, 0x9a, 0x8e, 0x0f, 0x2b, 0x33, 0xda, + 0x87, 0xe8, 0xda, 0x27, 0xfb, 0xab, 0xf2, 0x1b, 0x39, 0xb3, 0x59, 0x80, 0x52, 0x53, 0x2d, 0x16, + 0xcb, 0xb4, 0xa5, 0x55, 0xcb, 0x01, 0x27, 0x38, 0x74, 0x8f, 0xd2, 0x2b, 0x48, 0xda, 0x96, 0x8f, + 0x5e, 0x50, 0x30, 0x83, 0xd8, 0xaf, 0x02, 0xac, 0x3c, 0x20, 0x4d, 0xbe, 0xd4, 0xf5, 0x47, 0x5d, + 0x2b, 0xff, 0x51, 0xd7, 0xfe, 0x8f, 0x16, 0x4c, 0xad, 0x2e, 0x1b, 0xd7, 0xc4, 0x79, 0x00, 0x7e, + 0x37, 0xba, 0x77, 0x6f, 0x5d, 0xbe, 0x57, 0x70, 0x95, 0xb3, 0x2a, 0xc5, 0x1a, 0x06, 0xba, 0x08, + 0x45, 0xaf, 0xe3, 0x8b, 0x2b, 0xcb, 0xe8, 0xd1, 0x61, 0xa5, 0x78, 0xab, 0xe3, 0x63, 0x5a, 0xa6, + 0x99, 0xcf, 0x15, 0x07, 0x36, 0x9f, 0xeb, 0xeb, 0x25, 0x81, 0x2a, 0x30, 0x7c, 0xff, 0xbe, 0xdb, + 0x8a, 0xca, 0xc3, 0xc9, 0x5b, 0xca, 0xbd, 0x7b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, 0xfd, 0x97, 0x8a, + 0x30, 0xb3, 0xea, 0x91, 0x07, 0x0f, 0x65, 0x85, 0x3b, 0xa8, 0xc9, 0xdf, 0x9d, 0x6e, 0x29, 0xf1, + 0xb4, 0x8d, 0x1c, 0xfb, 0x0f, 0xc5, 0x7b, 0x30, 0xca, 0x6d, 0x03, 0xf8, 0x60, 0x8c, 0x5f, 0x7d, + 0x25, 0xab, 0x0b, 0xe9, 0xb1, 0x98, 0x17, 0xea, 0x38, 0x6e, 0x28, 0xa5, 0x8e, 0x56, 0x51, 0x8a, + 0x25, 0xc9, 0xb9, 0xcf, 0xc3, 0x84, 0x8e, 0x79, 0x22, 0x8b, 0xa9, 0xbf, 0x6c, 0xc1, 0xb9, 0x55, + 0x2f, 0x68, 0xee, 0xa6, 0xec, 0x2f, 0x5f, 0x87, 0x71, 0xca, 0x34, 0x22, 0xc3, 0xf2, 0xdc, 0xf0, + 0x45, 0x10, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0xee, 0xdc, 0xa9, 0x55, 0xb3, 0x5c, 0x18, 0x04, 0x08, + 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0xf0, 0xd4, 0xf5, 0xe5, 0x95, 0xc4, 0x04, 0xb9, 0xcb, 0x8b, 0x82, + 0x5e, 0x39, 0x5a, 0x5a, 0x57, 0x92, 0x2b, 0x47, 0x95, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x1e, 0x42, + 0x3f, 0x69, 0xc1, 0xb9, 0xeb, 0x6e, 0x4c, 0xcf, 0x80, 0xb4, 0x3d, 0x3f, 0x3d, 0x04, 0x22, 0x37, + 0x0e, 0xc2, 0x83, 0xb4, 0x3d, 0x3f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0xdd, 0x88, 0xf6, + 0xb4, 0x60, 0xea, 0x3d, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x7e, 0x58, 0xcb, 0x0d, 0x99, 0xdc, 0x7a, + 0x20, 0xb6, 0xb3, 0xfa, 0xb0, 0xaa, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x51, 0x0b, 0x2e, 0x5c, 0xf7, + 0x3a, 0x51, 0x4c, 0xc2, 0xad, 0xc8, 0xe8, 0xec, 0xab, 0x50, 0x22, 0xf2, 0x6e, 0x28, 0xfa, 0xaa, + 0xa4, 0x19, 0x75, 0x69, 0xe4, 0xce, 0x04, 0x0a, 0x6f, 0x00, 0x63, 0xe6, 0x93, 0x19, 0xe1, 0xfe, + 0x4c, 0x01, 0x26, 0x6f, 0x6c, 0x6c, 0xd4, 0xaf, 0x93, 0x58, 0xb0, 0xcc, 0xfe, 0x7a, 0x4d, 0xac, + 0xa9, 0x67, 0x7a, 0x49, 0xe0, 0x9d, 0xd8, 0xf5, 0xe6, 0xb9, 0xf7, 0xda, 0x7c, 0xcd, 0x8f, 0x6f, + 0x87, 0x8d, 0x38, 0x74, 0xfd, 0xed, 0x4c, 0x85, 0x8e, 0x64, 0xec, 0xc5, 0x3c, 0xc6, 0x8e, 0x5e, + 0x85, 0x11, 0xe6, 0x3e, 0x27, 0x65, 0xe1, 0x27, 0x94, 0x00, 0xcb, 0x4a, 0x8f, 0x0f, 0x2b, 0xa5, + 0x3b, 0xb8, 0xc6, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, 0xf8, 0x4e, 0x1c, 0xb7, 0x6f, 0x10, 0xa7, + 0x45, 0x42, 0xc9, 0x1d, 0x2e, 0x65, 0x71, 0x07, 0x3a, 0x08, 0x1c, 0x2d, 0xd9, 0x50, 0x49, 0x59, + 0x84, 0x75, 0x3a, 0x76, 0x03, 0x20, 0x81, 0x9d, 0xd2, 0x65, 0xd6, 0xfe, 0x7d, 0x0b, 0x46, 0xb9, + 0x27, 0x43, 0x88, 0xbe, 0x00, 0x43, 0xe4, 0x01, 0x69, 0x0a, 0x31, 0x25, 0xb3, 0xc3, 0xc9, 0x29, + 0xc7, 0x55, 0xb3, 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa5, 0xbd, 0xbd, 0xae, 0xdc, 0x3a, + 0x9e, 0xce, 0xfb, 0x62, 0x35, 0xed, 0xfc, 0x60, 0x14, 0x45, 0x58, 0x56, 0x67, 0x6a, 0xc6, 0x66, + 0xbb, 0x41, 0x19, 0x58, 0xdc, 0x4b, 0x09, 0xb0, 0xb1, 0x5c, 0xe7, 0x48, 0x82, 0x1a, 0x57, 0x33, + 0xca, 0x42, 0x9c, 0x10, 0xb1, 0x37, 0xa0, 0x44, 0x27, 0x75, 0xd1, 0x73, 0x9d, 0xde, 0x1a, 0xce, + 0x17, 0xa1, 0x24, 0xb5, 0x8d, 0x91, 0xf0, 0xb5, 0x60, 0x54, 0xa5, 0x32, 0x32, 0xc2, 0x09, 0xdc, + 0xde, 0x82, 0xf3, 0xec, 0xe9, 0xde, 0x89, 0x77, 0x8c, 0x3d, 0xd6, 0x7f, 0x31, 0xbf, 0x24, 0xa4, + 0x7e, 0x3e, 0x33, 0x65, 0xcd, 0x38, 0x7c, 0x42, 0x52, 0x4c, 0x6e, 0x00, 0xf6, 0x1f, 0x0e, 0xc1, + 0x13, 0xb5, 0x46, 0xbe, 0x93, 0xcb, 0x35, 0x98, 0xe0, 0x32, 0x01, 0x5d, 0xda, 0x8e, 0x27, 0xda, + 0x55, 0x0f, 0x5b, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x29, 0x28, 0xba, 0x1f, 0xf8, 0x69, 0x4b, + 0xd0, 0xda, 0x3b, 0xeb, 0x98, 0x96, 0x53, 0x30, 0x15, 0x2f, 0x38, 0x2b, 0x55, 0x60, 0x25, 0x62, + 0xbc, 0x05, 0x53, 0x6e, 0xd4, 0x8c, 0xdc, 0x9a, 0x4f, 0xf9, 0x4c, 0xe2, 0x20, 0x95, 0xdc, 0x48, + 0x69, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0x35, 0xbe, 0x3e, 0x3c, 0xb0, 0x88, 0xd2, 0xd7, 0xf9, 0x80, + 0x4a, 0x5f, 0x6d, 0xf6, 0x75, 0x11, 0xb3, 0x4a, 0x13, 0xd2, 0x17, 0xff, 0xe0, 0x08, 0x4b, 0x18, + 0x15, 0xf7, 0x9b, 0x3b, 0x4e, 0x7b, 0xb1, 0x13, 0xef, 0x54, 0xdd, 0xa8, 0x19, 0xec, 0x93, 0xf0, + 0x80, 0xdd, 0xd4, 0xc6, 0x12, 0x71, 0x5f, 0x01, 0x96, 0x6f, 0x2c, 0xd6, 0x29, 0x26, 0xee, 0xae, + 0x63, 0xaa, 0xac, 0xe0, 0x34, 0x5c, 0x55, 0x16, 0x61, 0x5a, 0x36, 0xd3, 0x20, 0x11, 0x3b, 0x23, + 0xc6, 0x59, 0xc7, 0x94, 0xeb, 0xa2, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0x37, 0x60, 0xd2, 0xf5, + 0xdd, 0xd8, 0x75, 0xe2, 0x20, 0x64, 0x27, 0x2c, 0xbf, 0x94, 0xb1, 0x17, 0xb8, 0x9a, 0x0e, 0xc0, + 0x26, 0x9e, 0xfd, 0x07, 0x43, 0x30, 0xcb, 0xa6, 0xed, 0x9b, 0x2b, 0xec, 0x13, 0xb3, 0xc2, 0xee, + 0x74, 0xaf, 0xb0, 0xd3, 0x10, 0x77, 0x3f, 0xce, 0x65, 0xf6, 0x3e, 0x94, 0x94, 0x31, 0xaf, 0xb4, + 0x47, 0xb7, 0x72, 0xec, 0xd1, 0xfb, 0x4b, 0x1f, 0xf2, 0xcd, 0xb0, 0x98, 0xf9, 0x66, 0xf8, 0x77, + 0x2c, 0x48, 0x6c, 0x1a, 0xd1, 0x0d, 0x28, 0xb5, 0x03, 0x66, 0x37, 0x10, 0x4a, 0x63, 0x9c, 0x27, + 0x32, 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0xf1, 0xab, 0xcb, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, + 0x1d, 0x92, 0x46, 0xcc, 0xbc, 0xf2, 0xfa, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, + 0x59, 0x0b, 0x80, 0x3f, 0xcb, 0x39, 0xfe, 0x36, 0x39, 0x03, 0x55, 0x63, 0x15, 0x86, 0xa2, 0x36, + 0x69, 0xf6, 0xb2, 0xe8, 0x48, 0xfa, 0xd3, 0x68, 0x93, 0x66, 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, + 0xed, 0xef, 0x01, 0x98, 0x4a, 0xd0, 0x6a, 0x31, 0xd9, 0x43, 0x2f, 0x1b, 0x3e, 0x4f, 0x17, 0x53, + 0x3e, 0x4f, 0x25, 0x86, 0xad, 0x69, 0xb5, 0xde, 0x87, 0xe2, 0x9e, 0xf3, 0x40, 0xa8, 0x2d, 0x5e, + 0xec, 0xdd, 0x0d, 0x4a, 0x7f, 0x7e, 0xcd, 0x79, 0xc0, 0xef, 0x4c, 0x2f, 0xca, 0x05, 0xb2, 0xe6, + 0x3c, 0x38, 0xe6, 0x76, 0x1b, 0x8c, 0x49, 0xdd, 0x72, 0xa3, 0xf8, 0x6b, 0xff, 0x25, 0xf9, 0xcf, + 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0xfa, 0xe2, 0x91, 0x6a, 0xa0, 0xb6, 0x5c, 0x3f, 0xdd, 0x96, + 0xeb, 0x0f, 0xd0, 0x96, 0xeb, 0xa3, 0x0f, 0x61, 0x54, 0x3c, 0x08, 0x33, 0x63, 0x6d, 0x53, 0x25, + 0x92, 0xd7, 0x9e, 0x78, 0x4f, 0xe6, 0x6d, 0x2e, 0xc8, 0x3b, 0xa1, 0x28, 0xed, 0xdb, 0xae, 0x6c, + 0x10, 0xfd, 0x2d, 0x0b, 0xa6, 0xc4, 0x6f, 0x4c, 0x3e, 0xe8, 0x90, 0x28, 0x16, 0xb2, 0xe7, 0xe7, + 0x06, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x39, 0xc9, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0xa5, 0x7a, + 0x81, 0xfe, 0x89, 0x05, 0xe7, 0xf7, 0x9c, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xd8, 0x0d, 0x84, + 0xf1, 0xf9, 0x17, 0x06, 0x9b, 0xfe, 0xae, 0xea, 0xbc, 0x93, 0xd2, 0x4e, 0xf5, 0x7c, 0x16, 0x4a, + 0xdf, 0xae, 0x66, 0xf6, 0x6b, 0x6e, 0x0b, 0xc6, 0xe4, 0x7a, 0xcb, 0xb8, 0x79, 0x57, 0x75, 0xc1, + 0xfa, 0xc4, 0xef, 0xf1, 0xda, 0x4d, 0x9d, 0xb5, 0x23, 0xd6, 0xda, 0x23, 0x6d, 0xe7, 0x7d, 0x98, + 0xd0, 0xd7, 0xd8, 0x23, 0x6d, 0xeb, 0x03, 0x38, 0x97, 0xb1, 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe1, + 0x62, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x63, 0xe9, 0x7c, 0xf0, 0x0c, 0xf4, 0xbd, 0xcb, + 0xa6, 0xbe, 0xf7, 0x52, 0xef, 0x9d, 0x93, 0xa3, 0xf4, 0x7d, 0x4f, 0xef, 0x34, 0xe5, 0xea, 0xe8, + 0x6d, 0x18, 0xf1, 0x68, 0x89, 0xb4, 0x44, 0xb0, 0xfb, 0xef, 0xc8, 0x44, 0x96, 0x62, 0xe5, 0x11, + 0x16, 0x14, 0xec, 0x5f, 0xb0, 0x60, 0xe8, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x72, 0x2e, 0x69, + 0x11, 0x06, 0x67, 0x1e, 0x3b, 0xf7, 0x57, 0x64, 0xa8, 0x9f, 0x9c, 0x81, 0xf9, 0xbf, 0x05, 0x18, + 0xa7, 0x4d, 0x49, 0x93, 0xb9, 0x37, 0x61, 0xd2, 0x73, 0x36, 0x89, 0x27, 0x1f, 0x0d, 0xd3, 0x0a, + 0x93, 0x5b, 0x3a, 0x10, 0x9b, 0xb8, 0xb4, 0xf2, 0x96, 0xfe, 0x7e, 0x2a, 0xe4, 0x17, 0x55, 0xd9, + 0x78, 0x5c, 0xc5, 0x26, 0x2e, 0xbd, 0xbb, 0xdf, 0x77, 0xe2, 0xe6, 0x8e, 0x50, 0xa6, 0xa8, 0xee, + 0xde, 0xa3, 0x85, 0x98, 0xc3, 0xa8, 0x00, 0x27, 0x57, 0xe7, 0x5d, 0x7a, 0x33, 0x0c, 0x7c, 0x21, + 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0x70, 0x7e, 0x1e, 0x66, 0x06, 0x81, 0x03, + 0x38, 0x3f, 0xa3, 0x3a, 0x9c, 0x77, 0xfd, 0xa6, 0xd7, 0x69, 0x91, 0x3b, 0x3e, 0x97, 0xee, 0x3c, + 0xf7, 0x43, 0xd2, 0x12, 0x02, 0xb4, 0xb2, 0xdd, 0xac, 0x65, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5f, + 0x80, 0x73, 0xb7, 0x02, 0xa7, 0xb5, 0xe4, 0x78, 0x8e, 0xdf, 0x24, 0x61, 0xcd, 0xdf, 0xee, 0x6b, + 0x92, 0xa4, 0x1b, 0x10, 0x15, 0xfa, 0x19, 0x10, 0xd9, 0x3b, 0x80, 0xf4, 0x06, 0x84, 0x21, 0x2c, + 0x86, 0x51, 0x97, 0x37, 0x25, 0x96, 0xff, 0x73, 0xd9, 0xd2, 0x75, 0x57, 0xcf, 0x34, 0x13, 0x4f, + 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x1a, 0x64, 0x3a, 0xbf, 0xf5, 0x57, 0xdb, 0xd8, 0xaf, 0xc3, 0x2c, + 0xab, 0x79, 0x32, 0x95, 0x82, 0xfd, 0xd7, 0x2c, 0x98, 0x5e, 0x4f, 0x85, 0x2b, 0x78, 0x96, 0x3d, + 0xec, 0x65, 0xe8, 0xdd, 0x1b, 0xac, 0x14, 0x0b, 0xe8, 0xa9, 0xeb, 0xf7, 0xfe, 0xd4, 0x82, 0x92, + 0x8a, 0x84, 0x72, 0x06, 0x42, 0xed, 0xb2, 0x21, 0xd4, 0x66, 0xea, 0x9d, 0x54, 0x77, 0xf2, 0x64, + 0x5a, 0x74, 0x53, 0x39, 0xde, 0xf7, 0x50, 0x39, 0x25, 0x64, 0xb8, 0x9b, 0xf6, 0x94, 0xe9, 0x9d, + 0x2f, 0x5d, 0xf1, 0x99, 0x4d, 0x90, 0xc2, 0xfd, 0x84, 0xd8, 0x04, 0xa9, 0xfe, 0xe4, 0x70, 0xbf, + 0xba, 0xd6, 0x65, 0x76, 0x2a, 0x7c, 0x2b, 0xb3, 0x9b, 0x67, 0x7b, 0x53, 0xc5, 0xbb, 0xa8, 0x08, + 0x3b, 0x78, 0x51, 0x7a, 0xcc, 0x18, 0x99, 0xf8, 0xc7, 0xa3, 0xd6, 0x24, 0x55, 0xec, 0x1b, 0x30, + 0x9d, 0x1a, 0x30, 0xf4, 0x3a, 0x0c, 0xb7, 0x77, 0x9c, 0x88, 0xa4, 0xec, 0x20, 0x87, 0xeb, 0xb4, + 0xf0, 0xf8, 0xb0, 0x32, 0xa5, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, 0xff, 0x69, 0xc1, 0xd0, 0x7a, + 0xd0, 0x3a, 0x8b, 0xc5, 0xf4, 0x96, 0xb1, 0x98, 0x9e, 0xcc, 0x8b, 0xf9, 0x95, 0xbb, 0x8e, 0x56, + 0x53, 0xeb, 0xe8, 0x52, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x0f, 0xc6, 0x59, 0x24, 0x31, 0x61, 0x97, + 0xf9, 0xaa, 0x71, 0xbf, 0xaa, 0xa4, 0xee, 0x57, 0xd3, 0x1a, 0xaa, 0x76, 0xcb, 0x7a, 0x1e, 0x46, + 0x85, 0x6d, 0x60, 0xda, 0x43, 0x40, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, 0x08, 0x46, 0xe4, 0x32, + 0xf4, 0xcb, 0x16, 0xcc, 0x87, 0xdc, 0xe5, 0xb2, 0x55, 0xed, 0x84, 0xae, 0xbf, 0xdd, 0x68, 0xee, + 0x90, 0x56, 0xc7, 0x73, 0xfd, 0xed, 0xda, 0xb6, 0x1f, 0xa8, 0xe2, 0x95, 0x07, 0xa4, 0xd9, 0x61, + 0x6f, 0x2e, 0x7d, 0xc2, 0xa4, 0x29, 0xdb, 0x9b, 0xab, 0x47, 0x87, 0x95, 0x79, 0x7c, 0x22, 0xda, + 0xf8, 0x84, 0x7d, 0x41, 0xbf, 0x61, 0xc1, 0x02, 0x0f, 0xe8, 0x35, 0x78, 0xff, 0x7b, 0xdc, 0x46, + 0xeb, 0x92, 0x54, 0x42, 0x64, 0x83, 0x84, 0x7b, 0x4b, 0x6f, 0x88, 0x01, 0x5d, 0xa8, 0x9f, 0xac, + 0x2d, 0x7c, 0xd2, 0xce, 0xd9, 0xff, 0xa6, 0x08, 0x93, 0x74, 0x14, 0x93, 0x30, 0x23, 0xaf, 0x1b, + 0x4b, 0xe2, 0xe9, 0xd4, 0x92, 0x98, 0x35, 0x90, 0x4f, 0x27, 0xc2, 0x48, 0x04, 0xb3, 0x9e, 0x13, + 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x9b, 0xc4, 0xe1, 0x36, 0x29, 0xc5, 0x13, 0xdb, 0xcf, 0x28, 0xf5, + 0xd7, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x03, 0x62, 0x86, 0x35, 0xa1, 0xe3, 0x47, 0xfc, + 0x5b, 0x5c, 0xf1, 0x1e, 0x73, 0xb2, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, 0xd1, + 0x82, 0x66, 0x30, 0x35, 0x3c, 0xa8, 0xc1, 0xd4, 0x48, 0x1f, 0x37, 0x9c, 0x3d, 0x98, 0x11, 0xb3, + 0xb2, 0xe5, 0x6e, 0x8b, 0x43, 0xfa, 0xcb, 0x29, 0x83, 0x4a, 0x6b, 0x70, 0xab, 0x98, 0x3e, 0xd6, + 0x94, 0xf6, 0x77, 0xc2, 0x39, 0xda, 0x9c, 0xe9, 0x34, 0x12, 0x21, 0x02, 0xd3, 0xbb, 0x9d, 0x4d, + 0xe2, 0x91, 0x58, 0x96, 0x89, 0x46, 0x33, 0xc5, 0x7e, 0xb3, 0x76, 0x22, 0x5b, 0xde, 0x34, 0x49, + 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2c, 0x60, 0xa6, 0xd9, 0x67, 0x70, 0xfc, 0x7d, 0xd1, 0x3c, 0xfe, + 0xca, 0x79, 0x1c, 0x28, 0xe7, 0xe4, 0x7b, 0x8d, 0x4f, 0x4b, 0x3d, 0x0c, 0x1e, 0x1c, 0x48, 0xd9, + 0xbf, 0xbf, 0xc4, 0xf5, 0x7f, 0x2c, 0xbe, 0x21, 0x95, 0x07, 0x3a, 0xfa, 0x2e, 0x18, 0x6b, 0x3a, + 0x6d, 0xa7, 0xc9, 0x43, 0x46, 0xe6, 0x6a, 0x7f, 0x8c, 0x4a, 0xf3, 0xcb, 0xa2, 0x06, 0xd7, 0x66, + 0x7c, 0x56, 0x7e, 0xa5, 0x2c, 0xee, 0xab, 0xc1, 0x50, 0x4d, 0xce, 0xed, 0xc2, 0xa4, 0x41, 0xec, + 0x91, 0x5e, 0x7d, 0xbf, 0x8b, 0x1f, 0x17, 0xea, 0xc6, 0xb2, 0x07, 0xb3, 0xbe, 0xf6, 0x9f, 0x32, + 0x47, 0x29, 0x4e, 0x7f, 0xba, 0xdf, 0x81, 0xc0, 0x38, 0xa9, 0x66, 0x7a, 0x9e, 0x22, 0x83, 0xbb, + 0x29, 0xdb, 0x7f, 0xcf, 0x82, 0xc7, 0x75, 0x44, 0x2d, 0x38, 0x40, 0x3f, 0x7d, 0x72, 0x15, 0xc6, + 0x82, 0x36, 0x09, 0x9d, 0xe4, 0x4e, 0x76, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, 0x7c, 0x58, 0x39, + 0xaf, 0x53, 0x97, 0xe5, 0x58, 0xd5, 0x44, 0x36, 0x8c, 0xb0, 0xc1, 0x88, 0x44, 0xe0, 0x06, 0x66, + 0x13, 0xc7, 0x9e, 0x56, 0x23, 0x2c, 0x20, 0xf6, 0xf7, 0x58, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x1f, + 0xc0, 0xcc, 0x1e, 0xbd, 0xbe, 0xad, 0x3c, 0x68, 0x87, 0x5c, 0x8d, 0x2e, 0xc7, 0xe9, 0xc5, 0x7e, + 0xe3, 0xa4, 0x7d, 0x64, 0x62, 0x39, 0xb5, 0x96, 0x22, 0x86, 0xbb, 0xc8, 0xdb, 0x7f, 0x5c, 0xe0, + 0x3b, 0x91, 0x49, 0x75, 0xcf, 0xc3, 0x68, 0x3b, 0x68, 0x2d, 0xd7, 0xaa, 0x58, 0x8c, 0x90, 0x62, + 0x57, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x8e, 0xa7, 0x0c, + 0x3f, 0x94, 0xf0, 0xb4, 0xa2, 0x20, 0x58, 0xc3, 0xa2, 0x75, 0xda, 0x61, 0xb0, 0xef, 0xb6, 0x98, + 0x6b, 0x5b, 0xd1, 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0xd1, 0xab, 0x72, 0xc7, 0x8f, 0xf8, 0x01, + 0xe8, 0x6c, 0x8a, 0x58, 0x67, 0x63, 0xc9, 0x55, 0xf9, 0x8e, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, + 0x91, 0xd8, 0x61, 0xe6, 0x0c, 0xc3, 0xf9, 0x36, 0x70, 0x1b, 0x14, 0x43, 0x8f, 0x21, 0x48, 0x2b, + 0x60, 0x51, 0x11, 0xbd, 0x2b, 0x59, 0x30, 0x67, 0xc9, 0xc2, 0xf8, 0x34, 0x77, 0xd9, 0xea, 0xec, + 0x5b, 0xe7, 0xc1, 0xc2, 0xa8, 0xd5, 0xa0, 0x65, 0x7f, 0x77, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x87, + 0x5d, 0x2c, 0xe2, 0xa5, 0xde, 0xf2, 0xe1, 0xe9, 0xf1, 0x07, 0xf4, 0xbd, 0x16, 0x8c, 0x3b, 0x9e, + 0x17, 0x34, 0x9d, 0x98, 0x8d, 0x72, 0xa1, 0x37, 0x8b, 0x12, 0xed, 0x2f, 0x26, 0x35, 0x78, 0x17, + 0x5e, 0x95, 0x96, 0x0a, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0x67, 0xe5, 0x25, 0x80, 0x2f, + 0x8f, 0xb9, 0xf4, 0x25, 0xa0, 0xc4, 0xb8, 0xb1, 0x26, 0xff, 0xa3, 0x3b, 0x46, 0x50, 0xb0, 0xa1, + 0xfc, 0xf8, 0x07, 0x86, 0xd0, 0xd3, 0x2f, 0x1e, 0x18, 0xaa, 0xeb, 0x4e, 0x38, 0xc3, 0xf9, 0x41, + 0x42, 0x34, 0xe9, 0xba, 0x8f, 0x03, 0xce, 0xfb, 0x30, 0xdd, 0x32, 0x8f, 0x5b, 0xb1, 0x9a, 0x9e, + 0xcb, 0xa3, 0x9b, 0x3a, 0x9d, 0x93, 0x03, 0x36, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, 0xdd, 0xa1, + 0x6a, 0xfe, 0x56, 0x20, 0x8c, 0x98, 0xed, 0xdc, 0xb9, 0x3c, 0x88, 0x62, 0xb2, 0x47, 0x31, 0x93, + 0x73, 0x74, 0x5d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x1b, 0x46, 0x98, 0x8f, 0x6a, 0x54, 0x1e, 0xcb, + 0xd7, 0x03, 0x9a, 0xe1, 0x15, 0x92, 0x4d, 0xc5, 0xfe, 0x46, 0x58, 0x50, 0x40, 0x37, 0x64, 0x0c, + 0x96, 0xa8, 0xe6, 0xdf, 0x89, 0x08, 0x8b, 0xc1, 0x52, 0x5a, 0xfa, 0x74, 0x12, 0x5e, 0x85, 0x97, + 0x67, 0x46, 0x0b, 0x36, 0x6a, 0x52, 0x79, 0x45, 0xfc, 0x97, 0x41, 0x88, 0xcb, 0x90, 0xdf, 0x3d, + 0x33, 0x50, 0x71, 0x32, 0x9c, 0x77, 0x4d, 0x12, 0x38, 0x4d, 0xf3, 0x4c, 0x8f, 0xcf, 0x39, 0x1f, + 0x66, 0xd2, 0x1b, 0xeb, 0x91, 0x1e, 0xd7, 0xbf, 0x3f, 0x04, 0x53, 0xe6, 0x42, 0x40, 0x0b, 0x50, + 0x12, 0x44, 0x54, 0x3c, 0x46, 0xb5, 0xb6, 0xd7, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x1e, 0x25, 0xab, + 0xae, 0xd9, 0x01, 0x26, 0xf1, 0x28, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x88, 0xde, 0x0c, 0x82, 0x58, + 0x1d, 0x05, 0x6a, 0xb5, 0x2c, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x08, 0xd8, 0x25, 0xa1, 0x4f, 0x3c, + 0x53, 0x93, 0xa9, 0x8e, 0x80, 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x48, 0x0b, 0x22, 0xb6, 0xfc, + 0x84, 0xa8, 0x9e, 0xd8, 0x55, 0x36, 0xb8, 0x8f, 0xb6, 0x84, 0xa3, 0x2f, 0xc3, 0xe3, 0xca, 0xa5, + 0x1a, 0x73, 0xcd, 0xb0, 0x6c, 0x71, 0xc4, 0xb8, 0x59, 0x3f, 0xbe, 0x9c, 0x8d, 0x86, 0xf3, 0xea, + 0xa3, 0xb7, 0x60, 0x4a, 0x88, 0xc0, 0x92, 0xe2, 0xa8, 0x69, 0xac, 0x70, 0xd3, 0x80, 0xe2, 0x14, + 0x36, 0xaa, 0xc2, 0x0c, 0x2d, 0x61, 0x52, 0xa8, 0xa4, 0xc0, 0x5d, 0xc3, 0xd5, 0x59, 0x7f, 0x33, + 0x05, 0xc7, 0x5d, 0x35, 0xd0, 0x22, 0x4c, 0x73, 0x19, 0x85, 0xde, 0x29, 0xd9, 0x3c, 0x08, 0xdf, + 0x02, 0xb5, 0x11, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0xd7, 0x60, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, + 0x4c, 0x9a, 0x71, 0x27, 0xe4, 0x4e, 0x07, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, + 0x21, 0x9c, 0xcb, 0x70, 0x4b, 0xa2, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, + 0xf5, 0x9a, 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x22, 0x85, 0xab, 0xd5, 0xb9, + 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0x7f, 0x1d, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0x77, 0x0d, 0x26, + 0x64, 0x78, 0x7b, 0x2d, 0xac, 0xb2, 0xfa, 0xcc, 0xeb, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, + 0x0a, 0x0a, 0x9d, 0xb2, 0xc7, 0x4c, 0x42, 0x42, 0x27, 0x38, 0xe8, 0x25, 0x18, 0x8b, 0x88, 0xb7, + 0x75, 0xcb, 0xf5, 0x77, 0xc5, 0xc2, 0x56, 0x5c, 0xb8, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, + 0xec, 0xb8, 0x2d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, 0xbc, 0x53, 0xab, 0x1e, 0x1f, 0x56, 0x9e, 0xce, + 0x8b, 0xda, 0x4f, 0xaf, 0xf6, 0xd1, 0x3c, 0xdd, 0x7e, 0xb4, 0x72, 0xd6, 0xdb, 0xc0, 0xc8, 0x09, + 0xdf, 0x06, 0xae, 0x02, 0x88, 0xaf, 0x96, 0x6b, 0xb9, 0x98, 0xcc, 0xda, 0x75, 0x05, 0xc1, 0x1a, + 0x16, 0x8a, 0x60, 0xb6, 0x19, 0x12, 0x47, 0xde, 0xa1, 0xb9, 0x83, 0xcd, 0xd8, 0xc3, 0x2b, 0x08, + 0x96, 0xd3, 0xc4, 0x70, 0x37, 0x7d, 0x14, 0xc0, 0x6c, 0x4b, 0x78, 0xf0, 0x27, 0x8d, 0x96, 0x4e, + 0xee, 0xd5, 0xc3, 0x0c, 0x72, 0xd2, 0x84, 0x70, 0x37, 0x6d, 0xf4, 0x15, 0x98, 0x93, 0x85, 0xdd, + 0x41, 0x13, 0xd8, 0x76, 0x29, 0x2e, 0x5d, 0x3a, 0x3a, 0xac, 0xcc, 0x55, 0x73, 0xb1, 0x70, 0x0f, + 0x0a, 0x08, 0xc3, 0x08, 0x7b, 0x4b, 0x8a, 0xca, 0xe3, 0xec, 0x9c, 0x7b, 0x21, 0x5f, 0x19, 0x40, + 0xd7, 0xfa, 0x3c, 0x7b, 0x87, 0x12, 0x26, 0xe5, 0xc9, 0xb3, 0x1c, 0x2b, 0xc4, 0x82, 0x12, 0xda, + 0x82, 0x71, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, 0x22, 0xd4, 0x44, 0xbe, 0xec, 0xa7, 0x11, 0x5e, 0x4c, + 0x6a, 0x70, 0xea, 0xca, 0x4a, 0x55, 0x83, 0x60, 0x9d, 0x30, 0xba, 0x0f, 0xd3, 0xc1, 0x7d, 0xca, + 0x1c, 0xa5, 0x96, 0x22, 0x2a, 0x4f, 0xb2, 0xb6, 0x5e, 0x1b, 0x50, 0x4f, 0x6b, 0x54, 0xd6, 0xb8, + 0x96, 0x49, 0x14, 0xa7, 0x5b, 0x41, 0xf3, 0x86, 0xb6, 0x7a, 0x2a, 0xf1, 0x9d, 0x48, 0xb4, 0xd5, + 0xba, 0x72, 0x9a, 0x05, 0xe1, 0xe0, 0x26, 0xd2, 0x6c, 0xf7, 0x4f, 0xa7, 0x82, 0x70, 0x24, 0x20, + 0xac, 0xe3, 0xa1, 0x1d, 0x98, 0x48, 0x9e, 0xac, 0xc2, 0x88, 0x85, 0x00, 0x1b, 0xbf, 0x7a, 0x75, + 0xb0, 0x8f, 0xab, 0x69, 0x35, 0xf9, 0xcd, 0x41, 0x2f, 0xc1, 0x06, 0xe5, 0xb9, 0x6f, 0x81, 0x71, + 0x6d, 0x62, 0x4f, 0xe2, 0x01, 0x30, 0xf7, 0x16, 0xcc, 0xa4, 0xa7, 0xee, 0x44, 0x1e, 0x04, 0xff, + 0xbb, 0x00, 0xd3, 0x19, 0x2f, 0x57, 0x2c, 0xf2, 0x7f, 0x8a, 0xa1, 0x26, 0x81, 0xfe, 0x4d, 0xb6, + 0x58, 0x18, 0x80, 0x2d, 0x4a, 0x1e, 0x5d, 0xcc, 0xe5, 0xd1, 0x82, 0x15, 0x0e, 0x7d, 0x14, 0x56, + 0x68, 0x9e, 0x3e, 0xc3, 0x03, 0x9d, 0x3e, 0xa7, 0xc0, 0x3e, 0x8d, 0x03, 0x6c, 0x74, 0x80, 0x03, + 0xec, 0x07, 0x0b, 0x30, 0x93, 0xb6, 0xf0, 0x3d, 0x83, 0xf7, 0x8e, 0xb7, 0x8d, 0xf7, 0x8e, 0xec, + 0x3c, 0x1a, 0x69, 0xbb, 0xe3, 0xbc, 0xb7, 0x0f, 0x9c, 0x7a, 0xfb, 0x78, 0x61, 0x20, 0x6a, 0xbd, + 0xdf, 0x41, 0xfe, 0x7e, 0x01, 0x2e, 0xa4, 0xab, 0x2c, 0x7b, 0x8e, 0xbb, 0x77, 0x06, 0x63, 0x73, + 0xdb, 0x18, 0x9b, 0x97, 0x07, 0xf9, 0x1a, 0xd6, 0xb5, 0xdc, 0x01, 0xba, 0x97, 0x1a, 0xa0, 0x85, + 0xc1, 0x49, 0xf6, 0x1e, 0xa5, 0x6f, 0x14, 0xe1, 0x52, 0x66, 0xbd, 0xe4, 0xb9, 0x60, 0xd5, 0x78, + 0x2e, 0xb8, 0x9a, 0x7a, 0x2e, 0xb0, 0x7b, 0xd7, 0x3e, 0x9d, 0xf7, 0x03, 0xe1, 0x7b, 0xcb, 0xc2, + 0x53, 0x3e, 0xe4, 0xdb, 0x81, 0xe1, 0x7b, 0xab, 0x08, 0x61, 0x93, 0xee, 0x9f, 0xa7, 0x37, 0x83, + 0x5f, 0xb7, 0xe0, 0x62, 0xe6, 0xdc, 0x9c, 0x81, 0x5e, 0x7d, 0xdd, 0xd4, 0xab, 0x3f, 0x3f, 0xf0, + 0x6a, 0xcd, 0x51, 0xb4, 0xff, 0x41, 0x31, 0xe7, 0x5b, 0x98, 0x66, 0xf2, 0x36, 0x8c, 0x3b, 0xcd, + 0x26, 0x89, 0xa2, 0xb5, 0xa0, 0xa5, 0xc2, 0x35, 0xbe, 0xcc, 0xa4, 0x8d, 0xa4, 0xf8, 0xf8, 0xb0, + 0x32, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x8c, 0x30, 0x5b, 0x38, 0xd5, 0x08, 0xb3, 0x57, + 0x01, 0xf6, 0x95, 0xbe, 0x22, 0xad, 0xe6, 0xd4, 0x34, 0x19, 0x1a, 0x16, 0xfa, 0x0e, 0x76, 0x0b, + 0xe0, 0xc6, 0x40, 0x7c, 0x29, 0xbe, 0x3a, 0xe0, 0x5c, 0xe9, 0x86, 0x45, 0x3c, 0xc8, 0x83, 0x52, + 0x09, 0x2b, 0x92, 0xe8, 0xdb, 0x60, 0x26, 0xe2, 0x31, 0x84, 0x96, 0x3d, 0x27, 0x62, 0x4e, 0x5c, + 0x62, 0x15, 0xb2, 0xc8, 0x0d, 0x8d, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xf9, 0x51, 0x2c, 0xe0, + 0x11, 0x5f, 0x98, 0xcf, 0x26, 0x1f, 0x24, 0xf2, 0x0e, 0x9d, 0x4f, 0x0f, 0x3f, 0x1b, 0x78, 0xad, + 0xa6, 0xfd, 0x83, 0x43, 0xf0, 0x44, 0x0f, 0x26, 0x86, 0x16, 0x4d, 0x23, 0x80, 0x17, 0xd3, 0xfa, + 0xbf, 0xb9, 0xcc, 0xca, 0x86, 0x42, 0x30, 0xb5, 0x56, 0x0a, 0x1f, 0x79, 0xad, 0x7c, 0x9f, 0xa5, + 0x69, 0x66, 0xb9, 0xa9, 0xf0, 0x17, 0x4f, 0xc8, 0x9c, 0x4f, 0x51, 0x55, 0xbb, 0x95, 0xa1, 0xef, + 0xbc, 0x3a, 0x70, 0x77, 0x06, 0x56, 0x80, 0x9e, 0xed, 0x93, 0xd1, 0xd7, 0x2c, 0x78, 0x3a, 0xb3, + 0xbf, 0x86, 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0xdc, 0xb3, 0x25, 0x00, + 0x27, 0x38, 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, + 0x27, 0xad, 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xad, 0x69, 0x78, 0x2c, + 0xc7, 0x13, 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, + 0xd3, 0x3f, 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x6b, 0x16, 0x9c, 0x77, 0xee, + 0x47, 0x5d, 0x69, 0x01, 0xc5, 0x9a, 0x79, 0x2d, 0x53, 0x4f, 0xdb, 0x27, 0x8d, 0x20, 0x73, 0x8b, + 0x3b, 0x9f, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xd8, 0xa5, 0xf2, 0x76, 0x0f, 0xa7, 0xf0, + 0x2c, 0x97, 0x3d, 0xce, 0x53, 0x25, 0x04, 0x2b, 0x3a, 0xe8, 0x2e, 0x94, 0xb6, 0xa5, 0x1f, 0xad, + 0xe0, 0xd9, 0x99, 0x87, 0x60, 0xa6, 0xb3, 0x2d, 0xf7, 0x1d, 0x51, 0x20, 0x9c, 0x90, 0x42, 0x6f, + 0x41, 0xd1, 0xdf, 0x8a, 0x7a, 0x65, 0x36, 0x4a, 0xd9, 0xf2, 0x71, 0xef, 0xfe, 0xf5, 0xd5, 0x06, + 0xa6, 0x15, 0xd1, 0x0d, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, + 0xf6, 0x22, 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, + 0x20, 0x64, 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x08, 0x00, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x36, + 0x8c, 0x34, 0x59, 0xf2, 0x1f, 0xa1, 0xf8, 0xc9, 0x8e, 0x0d, 0xd5, 0x95, 0x1e, 0x88, 0xbf, 0xa0, + 0xf2, 0x72, 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, + 0xcd, 0xa4, 0xd5, 0x23, 0xd7, 0x95, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, + 0xa6, 0xf0, 0xa4, 0xca, 0x7c, 0x43, 0x30, 0xc3, 0x32, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, + 0xcb, 0xb8, 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0xb4, 0x94, 0xe7, 0xb2, + 0xdd, 0xfe, 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x04, 0x6d, 0x00, 0x6c, 0xa9, + 0x18, 0x01, 0x22, 0x4a, 0xfb, 0xa7, 0x07, 0x89, 0x24, 0x20, 0x94, 0x1b, 0xaa, 0x14, 0x6b, 0x74, + 0xd0, 0x57, 0xa1, 0xe4, 0xc8, 0xe4, 0x73, 0x22, 0xce, 0xca, 0xab, 0x99, 0x9b, 0xb0, 0x77, 0x5e, + 0x3e, 0xbe, 0x82, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, + 0x5a, 0x16, 0x76, 0x25, 0xe7, 0x90, 0xba, 0x2b, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0x9f, + 0x61, 0xee, 0x62, 0x77, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x07, 0xfd, 0x83, 0x4e, 0xb0, 0x79, 0x10, + 0x13, 0x11, 0xcc, 0x3d, 0x73, 0xd0, 0xdf, 0xe1, 0x28, 0xdd, 0x83, 0x2e, 0x00, 0x58, 0x12, 0xa1, + 0xdb, 0xda, 0x91, 0x89, 0x1d, 0x85, 0x06, 0xe7, 0xf9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, + 0x8c, 0x1f, 0x26, 0xa4, 0x18, 0x1f, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0x78, 0xf0, 0x6c, 0x3e, + 0x1f, 0xac, 0x67, 0xe0, 0x77, 0xf3, 0xc1, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, + 0x08, 0xe3, 0xfb, 0x41, 0x28, 0x57, 0x15, 0xea, 0x71, 0xb5, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0xfa, + 0xdb, 0x84, 0xe0, 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xcb, 0xe7, + 0xf2, 0x0f, 0x98, 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, + 0x0c, 0xb3, 0x0c, 0x21, 0x2c, 0x0e, 0x7d, 0x4e, 0x3c, 0xaf, 0x2e, 0x0b, 0x69, 0xce, 0x87, 0x58, + 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x12, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x18, + 0xdf, 0x6e, 0xf4, 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x17, 0xa6, 0x9c, 0xf3, 0xb1, 0x7c, + 0x2e, 0x9c, 0xcf, 0x37, 0x19, 0x17, 0xa6, 0x5c, 0x93, 0x92, 0xb0, 0xbf, 0x36, 0xda, 0x2d, 0x95, + 0xb0, 0x3b, 0xd1, 0x77, 0x5b, 0x5d, 0x06, 0x03, 0x9f, 0x1b, 0x54, 0x45, 0x73, 0x8a, 0xf2, 0xe8, + 0xd7, 0x2c, 0x78, 0xac, 0x9d, 0xf9, 0x21, 0xe2, 0x88, 0x1f, 0x4c, 0xd3, 0xc3, 0x3f, 0x5d, 0xe5, + 0x8a, 0xc8, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x32, 0x7f, 0xf1, 0x23, 0xcb, 0xfc, 0x6b, 0x30, 0xc6, + 0xc4, 0xc8, 0x24, 0x78, 0xdc, 0x40, 0x66, 0x77, 0x4c, 0x58, 0x58, 0x16, 0x15, 0xb1, 0x22, 0x81, + 0xbe, 0xdf, 0x82, 0xa7, 0xd2, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0x20, 0x62, 0x7e, 0x1d, 0x5b, 0x15, + 0xdf, 0xff, 0x54, 0xbd, 0x17, 0xf2, 0x71, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0xcd, 0xb8, 0x0f, + 0x8e, 0x98, 0xef, 0x89, 0x03, 0xdc, 0x09, 0x5f, 0x83, 0x89, 0xbd, 0xa0, 0xe3, 0x4b, 0x9f, 0x18, + 0xe1, 0xf1, 0xcc, 0x74, 0xd7, 0x6b, 0x5a, 0x39, 0x36, 0xb0, 0x52, 0x37, 0xc9, 0xb1, 0x87, 0xbd, + 0x49, 0x9e, 0xed, 0xfd, 0xe4, 0xeb, 0x56, 0x86, 0x60, 0xcd, 0x6f, 0xac, 0x5f, 0x30, 0x6f, 0xac, + 0xcf, 0xa6, 0x6f, 0xac, 0x5d, 0x1a, 0x4a, 0xe3, 0xb2, 0x3a, 0x78, 0xa0, 0xf6, 0x41, 0xa3, 0xf4, + 0xd9, 0x1e, 0x5c, 0xee, 0x77, 0x70, 0x30, 0x13, 0xc6, 0x96, 0x7a, 0xdb, 0x4f, 0x4c, 0x18, 0x5b, + 0xb5, 0x2a, 0x66, 0x90, 0x41, 0x23, 0xeb, 0xd8, 0xff, 0xdd, 0x82, 0x62, 0x3d, 0x68, 0x9d, 0x81, + 0xc6, 0xf5, 0x8b, 0x86, 0xc6, 0xf5, 0x89, 0x9c, 0x14, 0xd6, 0xb9, 0xfa, 0xd5, 0x95, 0x94, 0x7e, + 0xf5, 0xa9, 0x3c, 0x02, 0xbd, 0xb5, 0xa9, 0x3f, 0x5e, 0x04, 0x3d, 0xe1, 0x36, 0xfa, 0xb7, 0x0f, + 0x63, 0x0b, 0x5f, 0xec, 0x95, 0x83, 0x5b, 0x50, 0x66, 0x96, 0x8f, 0xd2, 0xcd, 0xf6, 0xcf, 0x98, + 0x49, 0xfc, 0x3d, 0xe2, 0x6e, 0xef, 0xc4, 0xa4, 0x95, 0xfe, 0x9c, 0xb3, 0x33, 0x89, 0xff, 0xaf, + 0x16, 0x4c, 0xa7, 0x5a, 0x47, 0x5e, 0x96, 0xcf, 0xde, 0x43, 0x6a, 0xda, 0x66, 0xfb, 0x3a, 0xf9, + 0xcd, 0x03, 0xa8, 0xe7, 0x2c, 0xa9, 0x85, 0x62, 0x72, 0xb9, 0x7a, 0xef, 0x8a, 0xb0, 0x86, 0x81, + 0x5e, 0x87, 0xf1, 0x38, 0x68, 0x07, 0x5e, 0xb0, 0x7d, 0x70, 0x93, 0xc8, 0x58, 0x4e, 0xea, 0xd1, + 0x71, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0xc9, 0x22, 0xa4, 0x93, 0xb4, 0x7f, 0x73, 0x4d, 0x7e, + 0x32, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa1, 0xad, 0x33, 0xab, 0x32, 0x79, 0x1c, 0xaa, 0x84, 0x51, + 0x56, 0x8f, 0x84, 0x51, 0xcf, 0x52, 0xde, 0xd5, 0x0a, 0x3a, 0xb1, 0xd0, 0x62, 0x69, 0xcc, 0x89, + 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, 0x27, 0x9e, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, + 0xcc, 0x27, 0x35, 0x94, 0x93, 0x4f, 0x8a, 0x85, 0x41, 0x14, 0x96, 0x4c, 0x42, 0x30, 0xd1, 0xc2, + 0x20, 0x4a, 0x13, 0xa7, 0x04, 0xc7, 0xfe, 0x99, 0x22, 0x4c, 0xd4, 0x83, 0x56, 0xf2, 0xa0, 0xf4, + 0x9a, 0xf1, 0xa0, 0x74, 0x39, 0xf5, 0xa0, 0x34, 0xa3, 0xe3, 0x7e, 0xf3, 0xf9, 0xe8, 0xe3, 0x7a, + 0x3e, 0xfa, 0x57, 0x16, 0x9b, 0xb5, 0xea, 0x7a, 0x43, 0xe4, 0x3b, 0x7e, 0x05, 0xc6, 0x19, 0x43, + 0x62, 0xae, 0x9f, 0xf2, 0x95, 0x85, 0xa5, 0x35, 0x58, 0x4f, 0x8a, 0xb1, 0x8e, 0x83, 0xae, 0xc0, + 0x58, 0x44, 0x9c, 0xb0, 0xb9, 0xa3, 0x78, 0x9c, 0x78, 0x83, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x3b, + 0x49, 0x50, 0xc4, 0x62, 0x7e, 0xe6, 0x5e, 0xbd, 0x3f, 0x7c, 0x8b, 0xe4, 0x47, 0x42, 0xb4, 0xef, + 0x01, 0xea, 0xc6, 0x1f, 0xc0, 0xbc, 0xab, 0x62, 0x86, 0x3f, 0x2b, 0x75, 0x85, 0x3e, 0xfb, 0x13, + 0x0b, 0xa6, 0xea, 0x41, 0x8b, 0x6e, 0xdd, 0x3f, 0x4f, 0xfb, 0x54, 0x0f, 0x3f, 0x3a, 0xd2, 0x23, + 0xfc, 0xe8, 0x3f, 0xb0, 0x60, 0xb4, 0x1e, 0xb4, 0xce, 0x40, 0xf7, 0xfd, 0x05, 0x53, 0xf7, 0xfd, + 0x78, 0xce, 0x92, 0xc8, 0x51, 0x77, 0xff, 0x5c, 0x11, 0x26, 0x69, 0x3f, 0x83, 0x6d, 0x39, 0x4b, + 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x15, 0x73, 0x03, 0xcf, 0x0b, 0xee, 0xa7, 0x67, 0x6c, 0x95, + 0x95, 0x62, 0x01, 0x45, 0x2f, 0xc1, 0x58, 0x3b, 0x24, 0xfb, 0x6e, 0xd0, 0x89, 0xd2, 0x5e, 0xce, + 0x75, 0x51, 0x8e, 0x15, 0x06, 0xbd, 0x19, 0x45, 0xae, 0xdf, 0x24, 0xd2, 0xee, 0x6b, 0x88, 0xd9, + 0x7d, 0xf1, 0xb8, 0xe2, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x3d, 0x28, 0xb1, 0xff, 0x8c, 0xa3, 0x9c, + 0x3c, 0xd3, 0x95, 0x48, 0xe6, 0x21, 0x08, 0xe0, 0x84, 0x16, 0xba, 0x0a, 0x10, 0x4b, 0x0b, 0xb5, + 0x48, 0x38, 0xe1, 0x2b, 0x59, 0x5b, 0xd9, 0xae, 0x45, 0x58, 0xc3, 0x42, 0x2f, 0x42, 0x29, 0x76, + 0x5c, 0xef, 0x96, 0xeb, 0x93, 0x48, 0x58, 0xf8, 0x89, 0x5c, 0x1d, 0xa2, 0x10, 0x27, 0x70, 0x2a, + 0xeb, 0xb0, 0x10, 0x0f, 0x3c, 0x4f, 0xde, 0x18, 0xc3, 0x66, 0xb2, 0xce, 0x2d, 0x55, 0x8a, 0x35, + 0x0c, 0xfb, 0x1a, 0x5c, 0xa8, 0x07, 0xad, 0x7a, 0x10, 0xc6, 0xab, 0x41, 0x78, 0xdf, 0x09, 0x5b, + 0x72, 0xfe, 0x2a, 0x32, 0x6d, 0x04, 0xe5, 0x3d, 0xc3, 0x7c, 0x67, 0x1a, 0x09, 0x21, 0x5e, 0x65, + 0xd2, 0xce, 0x09, 0xdd, 0xb1, 0xfe, 0x7d, 0x81, 0x31, 0x8a, 0x54, 0xf2, 0x46, 0xf4, 0x15, 0x98, + 0x8a, 0xc8, 0x2d, 0xd7, 0xef, 0x3c, 0x90, 0x37, 0xd8, 0x1e, 0xbe, 0x6e, 0x8d, 0x15, 0x1d, 0x93, + 0xeb, 0xc1, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0x87, 0x30, 0xec, 0xf8, 0x8b, 0xd1, 0x9d, 0x88, 0x84, + 0x22, 0x79, 0x20, 0x1b, 0x42, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x4b, 0x86, 0xfd, 0x59, 0x0f, 0x7c, + 0x1c, 0x04, 0xb1, 0x5c, 0x64, 0x2c, 0xfd, 0x94, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, + 0xb4, 0xdb, 0x1e, 0x7b, 0x98, 0x76, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x47, 0x41, 0x91, 0xb9, + 0xa9, 0xd1, 0x05, 0xc5, 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0x11, 0xe5, 0x81, 0x6b, + 0xa4, 0x1b, 0xac, 0x08, 0x4b, 0x98, 0xfd, 0x5d, 0xec, 0xc0, 0x60, 0x39, 0xdf, 0xe2, 0x4e, 0x48, + 0xd0, 0x1e, 0x4c, 0xb6, 0xd9, 0x51, 0x2e, 0xa2, 0x67, 0x8b, 0x01, 0x7c, 0x38, 0x7b, 0x3e, 0x9e, + 0x03, 0x4a, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x7f, 0x9a, 0x66, 0x7c, 0xa9, 0xc1, 0xaf, 0x73, 0xa3, + 0xc2, 0x4a, 0x5f, 0xc8, 0xae, 0x73, 0xf9, 0x59, 0x22, 0x93, 0x23, 0x44, 0x58, 0xfa, 0x63, 0x59, + 0x17, 0xbd, 0xc3, 0x5e, 0x53, 0x39, 0x33, 0xe8, 0x97, 0x3c, 0x9a, 0x63, 0x19, 0x0f, 0xa7, 0xa2, + 0x22, 0xd6, 0x88, 0xa0, 0x5b, 0x30, 0x29, 0x52, 0x84, 0x09, 0xd5, 0x4e, 0xd1, 0x50, 0x0c, 0x4c, + 0x62, 0x1d, 0x78, 0x9c, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, 0x4f, 0x69, 0xf9, 0x32, 0x33, 0x6c, + 0x4a, 0x39, 0x6f, 0x79, 0xfa, 0xe8, 0xb0, 0xf2, 0xd4, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, + 0x0d, 0x17, 0x9c, 0x66, 0xec, 0xee, 0x93, 0x2a, 0x71, 0x5a, 0x9e, 0xeb, 0x13, 0x33, 0xec, 0xc7, + 0xc5, 0xa3, 0xc3, 0xca, 0x85, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0x7d, 0x01, 0x4a, 0x2d, 0x3f, + 0x12, 0x63, 0x30, 0x62, 0xa4, 0x82, 0x2d, 0x55, 0xd7, 0x1b, 0xea, 0xfb, 0x93, 0x3f, 0x38, 0xa9, + 0x80, 0xb6, 0x61, 0x42, 0x77, 0xed, 0x13, 0x69, 0x84, 0x5f, 0xee, 0x71, 0xeb, 0x37, 0xfc, 0xe1, + 0xb8, 0x5e, 0x53, 0x59, 0x6c, 0x1b, 0xae, 0x72, 0x06, 0x61, 0xf4, 0x36, 0x20, 0x2a, 0xcc, 0xb8, + 0x4d, 0xb2, 0xd8, 0x64, 0x41, 0xcc, 0x99, 0x36, 0x6c, 0xcc, 0x70, 0x3f, 0x42, 0x8d, 0x2e, 0x0c, + 0x9c, 0x51, 0x0b, 0xdd, 0xa0, 0x1c, 0x45, 0x2f, 0x15, 0x06, 0xf6, 0x52, 0x00, 0x2e, 0x57, 0x49, + 0x3b, 0x24, 0x4d, 0x27, 0x26, 0x2d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0x67, 0x04, + 0xa6, 0x59, 0x78, 0x77, 0x4e, 0x23, 0x7a, 0x77, 0xdc, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0xf7, 0x83, + 0x70, 0x57, 0xc4, 0xea, 0x4b, 0xc2, 0xc6, 0x26, 0x20, 0xac, 0xe3, 0x51, 0x59, 0x91, 0x3d, 0x67, + 0xd6, 0xaa, 0xec, 0x75, 0x69, 0x2c, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xab, + 0x2f, 0xb3, 0x37, 0xa3, 0x14, 0x6a, 0xad, 0xbe, 0x8c, 0x25, 0x1c, 0x91, 0xee, 0x34, 0xbb, 0x53, + 0xf9, 0xaf, 0x7d, 0xdd, 0x7c, 0x79, 0xc0, 0x4c, 0xbb, 0x3e, 0xcc, 0xa8, 0x04, 0xbf, 0x3c, 0x88, + 0x61, 0x54, 0x9e, 0x66, 0x8b, 0x64, 0xf0, 0x08, 0x88, 0x4a, 0xdb, 0x59, 0x4b, 0x51, 0xc2, 0x5d, + 0xb4, 0x8d, 0x70, 0x32, 0x33, 0x7d, 0xf3, 0x51, 0x2d, 0x40, 0x29, 0xea, 0x6c, 0xb6, 0x82, 0x3d, + 0xc7, 0xf5, 0xd9, 0x13, 0x8f, 0x26, 0x88, 0x34, 0x24, 0x00, 0x27, 0x38, 0x68, 0x15, 0xc6, 0x1c, + 0x71, 0x2d, 0x15, 0x8f, 0x32, 0x99, 0xf1, 0x25, 0xe4, 0xd5, 0x95, 0x8b, 0xd9, 0xf2, 0x1f, 0x56, + 0x75, 0xd1, 0x9b, 0x30, 0x29, 0x5c, 0x20, 0x85, 0xf5, 0xf2, 0x39, 0xd3, 0x5b, 0xa6, 0xa1, 0x03, + 0xb1, 0x89, 0x8b, 0xbe, 0x03, 0xa6, 0x28, 0x95, 0x84, 0xb1, 0x95, 0xcf, 0x0f, 0xc2, 0x11, 0xb5, + 0x3c, 0x23, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0x5a, 0xf0, 0xa4, 0xd3, 0x89, 0x03, 0xa6, 0x0e, 0x36, + 0xd7, 0xff, 0x46, 0xb0, 0x4b, 0x7c, 0xf6, 0x12, 0x33, 0xb6, 0x74, 0xf9, 0xe8, 0xb0, 0xf2, 0xe4, + 0x62, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, 0xba, 0x03, 0xe3, 0x71, 0xe0, 0x09, 0xb7, 0x83, 0xa8, 0xfc, + 0x58, 0x7e, 0x38, 0xac, 0x0d, 0x85, 0xa6, 0x2b, 0x5a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, + 0x8f, 0xb1, 0x40, 0xc1, 0x24, 0x2a, 0x3f, 0x9e, 0x3f, 0x30, 0x2a, 0x9e, 0xb0, 0xb9, 0x05, 0x45, + 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xb6, 0x1d, 0xba, 0x01, 0x5b, 0xd8, 0x4a, 0x15, 0x5f, 0x36, + 0x53, 0x4b, 0xd4, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0xbd, 0x88, 0xc9, 0xc2, 0xf2, 0x45, 0x9e, 0xa7, + 0x8c, 0x0b, 0xa7, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x7c, 0x99, 0x5f, 0x99, 0xca, 0x73, 0xf9, + 0x71, 0x39, 0xf4, 0xab, 0x15, 0x17, 0x5c, 0xd4, 0x5f, 0x9c, 0x50, 0x98, 0xfb, 0x56, 0x98, 0xed, + 0x62, 0xbc, 0x27, 0xb2, 0x28, 0xff, 0xa7, 0xc3, 0x50, 0x52, 0x7a, 0x57, 0xb4, 0x60, 0xaa, 0xd3, + 0x2f, 0xa6, 0xd5, 0xe9, 0x63, 0x54, 0xfc, 0xd3, 0x35, 0xe8, 0x1b, 0x86, 0x3d, 0x54, 0x21, 0x3f, + 0xdd, 0x98, 0xae, 0x74, 0xe8, 0xeb, 0xfe, 0xa9, 0x5d, 0xa3, 0x8b, 0x03, 0xeb, 0xe5, 0x87, 0x7a, + 0xde, 0xcc, 0x07, 0xcc, 0xa0, 0x4c, 0x6f, 0x9a, 0xed, 0xa0, 0x55, 0xab, 0xa7, 0x53, 0x8a, 0xd6, + 0x69, 0x21, 0xe6, 0x30, 0x76, 0x57, 0xa0, 0x52, 0x02, 0xbb, 0x2b, 0x8c, 0x3e, 0xe4, 0x5d, 0x41, + 0x12, 0xc0, 0x09, 0x2d, 0xe4, 0xc1, 0x6c, 0xd3, 0xcc, 0x06, 0xab, 0x5c, 0x3e, 0x9f, 0xe9, 0x9b, + 0x97, 0xb5, 0xa3, 0xa5, 0x89, 0x5b, 0x4e, 0x53, 0xc1, 0xdd, 0x84, 0xd1, 0x9b, 0x30, 0xf6, 0x41, + 0x10, 0xb1, 0x55, 0x2c, 0x8e, 0x4a, 0xe9, 0x64, 0x37, 0xf6, 0xce, 0xed, 0x06, 0x2b, 0x3f, 0x3e, + 0xac, 0x8c, 0xd7, 0x83, 0x96, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x00, 0x2e, 0x18, 0x0c, 0x46, 0x75, + 0x17, 0x06, 0xef, 0xee, 0x53, 0xa2, 0xb9, 0x0b, 0xb5, 0x2c, 0x4a, 0x38, 0xbb, 0x01, 0xfb, 0x17, + 0xb9, 0x76, 0x59, 0xe8, 0xa0, 0x48, 0xd4, 0xf1, 0xce, 0x22, 0x17, 0xd4, 0x8a, 0xa1, 0x1e, 0x7b, + 0xe8, 0x17, 0x8c, 0x5f, 0xb5, 0xd8, 0x0b, 0xc6, 0x06, 0xd9, 0x6b, 0x7b, 0x4e, 0x7c, 0x16, 0x7e, + 0x04, 0xef, 0xc0, 0x58, 0x2c, 0x5a, 0xeb, 0x95, 0xbe, 0x4a, 0xeb, 0x14, 0x7b, 0xc5, 0x51, 0xe7, + 0xab, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, 0xf9, 0x0c, 0x48, 0xc8, 0x19, 0xa8, 0x2a, 0xaa, 0xa6, + 0xaa, 0xa2, 0xd2, 0xe7, 0x0b, 0x72, 0x54, 0x16, 0xff, 0xcc, 0xec, 0x37, 0xbb, 0xca, 0x7c, 0xd2, + 0x9f, 0xce, 0xec, 0x1f, 0xb6, 0xe0, 0x7c, 0x96, 0x35, 0x08, 0x95, 0x89, 0xf8, 0x45, 0x4a, 0x3d, + 0x25, 0xaa, 0x11, 0xbc, 0x2b, 0xca, 0xb1, 0xc2, 0x18, 0x38, 0x59, 0xc7, 0xc9, 0x22, 0xca, 0xdd, + 0x06, 0x33, 0x71, 0x30, 0x7a, 0x8b, 0x3b, 0x06, 0x59, 0x2a, 0xb3, 0xef, 0xc9, 0x9c, 0x82, 0xec, + 0x9f, 0x2a, 0xc0, 0x79, 0xfe, 0x16, 0xb0, 0xb8, 0x1f, 0xb8, 0xad, 0x7a, 0xd0, 0x12, 0x6e, 0x52, + 0xef, 0xc2, 0x44, 0x5b, 0xbb, 0xfd, 0xf6, 0x8a, 0x69, 0xa5, 0xdf, 0x92, 0x93, 0x5b, 0x88, 0x5e, + 0x8a, 0x0d, 0x5a, 0xa8, 0x05, 0x13, 0x64, 0xdf, 0x6d, 0x2a, 0x85, 0x72, 0xe1, 0xc4, 0x2c, 0x5d, + 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, 0x47, 0x90, 0xe8, 0xcd, 0xfe, 0x11, 0x0b, 0x1e, 0xcf, + 0x89, 0x80, 0x45, 0x9b, 0xbb, 0xcf, 0x5e, 0x5d, 0x44, 0xce, 0x28, 0xd5, 0x1c, 0x7f, 0x8b, 0xc1, + 0x02, 0x8a, 0xbe, 0x04, 0xc0, 0xdf, 0x52, 0xa8, 0x50, 0x2e, 0x3e, 0x7d, 0xb0, 0xc8, 0x30, 0x5a, + 0xf8, 0x10, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x28, 0xc2, 0x30, 0xd3, 0xdd, 0xa3, 0x55, 0x18, + 0xdd, 0xe1, 0xf1, 0xb6, 0x07, 0x09, 0xed, 0x9d, 0xdc, 0x6e, 0x78, 0x01, 0x96, 0x95, 0xd1, 0x1a, + 0x9c, 0x13, 0xae, 0x78, 0x55, 0xe2, 0x39, 0x07, 0xf2, 0x92, 0xcc, 0xf3, 0x2c, 0xa9, 0xcc, 0x62, + 0xb5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd5, 0x15, 0x65, 0x93, 0x47, 0x2a, 0x57, 0x22, 0x75, + 0x9f, 0x48, 0x9b, 0x6f, 0xc2, 0x64, 0xbb, 0x4b, 0x1d, 0x30, 0x9c, 0x88, 0xfb, 0xa6, 0x0a, 0xc0, + 0xc4, 0x65, 0x66, 0x20, 0x1d, 0x66, 0xf4, 0xb2, 0xb1, 0x13, 0x92, 0x68, 0x27, 0xf0, 0x5a, 0x22, + 0xf5, 0x79, 0x62, 0x06, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0x9d, 0x90, + 0x24, 0x54, 0x46, 0x4c, 0x2a, 0xab, 0x29, 0x38, 0xee, 0xaa, 0x41, 0xd7, 0xd1, 0x05, 0x91, 0x37, + 0x5b, 0x06, 0x68, 0x50, 0xb6, 0x3d, 0xa3, 0xd2, 0x51, 0xa3, 0x47, 0xd0, 0x20, 0x61, 0x5b, 0xa1, + 0x32, 0x6f, 0x6b, 0x59, 0x59, 0x85, 0x8b, 0x86, 0xa4, 0xf2, 0x30, 0xd9, 0x9b, 0x7f, 0xd7, 0x82, + 0x73, 0x19, 0x36, 0x84, 0x9c, 0x55, 0x6d, 0xbb, 0x51, 0xac, 0xd2, 0xfb, 0x68, 0xac, 0x8a, 0x97, + 0x63, 0x85, 0x41, 0xf7, 0x03, 0x67, 0x86, 0x69, 0x06, 0x28, 0x6c, 0x74, 0x04, 0xf4, 0x64, 0x0c, + 0x10, 0x5d, 0x86, 0xa1, 0x4e, 0x44, 0x42, 0x99, 0xf6, 0x58, 0xf2, 0x6f, 0xa6, 0x60, 0x64, 0x10, + 0x2a, 0x51, 0x6e, 0x2b, 0xdd, 0x9e, 0x26, 0x51, 0x72, 0xed, 0x1e, 0x87, 0xd9, 0x3f, 0x50, 0x84, + 0x8b, 0xb9, 0x96, 0xc1, 0xb4, 0x4b, 0x7b, 0x81, 0xef, 0xc6, 0x81, 0x7a, 0x17, 0xe2, 0xd1, 0x6d, + 0x48, 0x7b, 0x67, 0x4d, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x95, 0x59, 0xf1, 0xd3, 0x09, 0x8c, 0x96, + 0xaa, 0x46, 0x62, 0xfc, 0x41, 0x33, 0x91, 0x3d, 0x03, 0x43, 0xed, 0x20, 0xf0, 0xd2, 0xcc, 0x88, + 0x76, 0x37, 0x08, 0x3c, 0xcc, 0x80, 0xe8, 0x33, 0x62, 0x1c, 0x52, 0x0f, 0x21, 0xd8, 0x69, 0x05, + 0x91, 0x36, 0x18, 0xcf, 0xc3, 0xe8, 0x2e, 0x39, 0x08, 0x5d, 0x7f, 0x3b, 0xfd, 0x40, 0x76, 0x93, + 0x17, 0x63, 0x09, 0x37, 0xf3, 0x77, 0x8c, 0x9e, 0x46, 0xfe, 0x0e, 0x7d, 0x66, 0xc7, 0xfa, 0x1e, + 0x6d, 0xdf, 0x57, 0x84, 0x69, 0xbc, 0x54, 0xfd, 0xe6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0xd3, 0xce, + 0xea, 0xd6, 0x7f, 0x36, 0x7e, 0xce, 0x82, 0x69, 0x16, 0xe3, 0x5a, 0x44, 0x67, 0x71, 0x03, 0xff, + 0x0c, 0x44, 0xb7, 0x67, 0x60, 0x38, 0xa4, 0x8d, 0xa6, 0x53, 0x35, 0xb1, 0x9e, 0x60, 0x0e, 0x43, + 0x4f, 0xc2, 0x10, 0xeb, 0x02, 0x9d, 0xbc, 0x09, 0x9e, 0xe5, 0xa2, 0xea, 0xc4, 0x0e, 0x66, 0xa5, + 0xcc, 0x4d, 0x16, 0x93, 0xb6, 0xe7, 0xf2, 0x4e, 0x27, 0x0a, 0xf5, 0x4f, 0x86, 0x9b, 0x6c, 0x66, + 0xd7, 0x3e, 0x9a, 0x9b, 0x6c, 0x36, 0xc9, 0xde, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x4b, 0x99, 0xf5, + 0x06, 0x76, 0x93, 0xed, 0x5d, 0xfb, 0x74, 0xec, 0x1c, 0xb2, 0xcd, 0x0f, 0x8a, 0x67, 0x68, 0x7e, + 0x30, 0x34, 0xa8, 0xe4, 0x38, 0x3c, 0x80, 0xf7, 0x6a, 0xe6, 0x90, 0x7d, 0x42, 0xbc, 0x57, 0x33, + 0xfb, 0x96, 0x73, 0xad, 0xfb, 0xd3, 0x42, 0xce, 0xb7, 0xb0, 0x0b, 0xde, 0x15, 0xca, 0x67, 0x18, + 0x30, 0x12, 0x92, 0xf0, 0x04, 0xe7, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x72, 0x35, 0x3f, 0x50, 0xde, + 0xb5, 0x37, 0x4f, 0xb4, 0x65, 0xe6, 0xcd, 0xf7, 0x0f, 0x3d, 0x94, 0x4c, 0xda, 0x27, 0x74, 0x4d, + 0xbb, 0x94, 0x17, 0x07, 0xbf, 0x94, 0x4f, 0x64, 0x5f, 0xc8, 0xd1, 0x22, 0x4c, 0xef, 0xb9, 0x3e, + 0x65, 0x9b, 0x07, 0xa6, 0x28, 0xaa, 0xc2, 0x22, 0xac, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7b, 0x13, + 0x26, 0x1f, 0x5e, 0x8b, 0xf8, 0x8d, 0x22, 0x3c, 0xd1, 0x63, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, + 0xc6, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0xe7, 0xb7, 0x3a, 0x9e, 0x77, 0xc0, 0x2c, 0xfc, 0x48, 0x4b, + 0x62, 0x08, 0x59, 0x51, 0x05, 0xb0, 0x5f, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0xdb, 0x80, 0x02, + 0x91, 0xb2, 0x36, 0x09, 0x90, 0xc3, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xdb, 0x5d, 0x18, 0x38, 0xa3, + 0x16, 0x15, 0xfa, 0xe9, 0xa9, 0x74, 0xa0, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, + 0xba, 0x0e, 0xb3, 0xce, 0xbe, 0xe3, 0xf2, 0x80, 0x89, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, + 0x31, 0x8d, 0x80, 0xbb, 0xeb, 0xa4, 0x1c, 0x51, 0x47, 0xf2, 0x1d, 0x51, 0x7b, 0xf3, 0xc5, 0x7e, + 0xaa, 0x58, 0xfb, 0x3f, 0x5b, 0xf4, 0xf8, 0xca, 0x48, 0x3b, 0x4f, 0xc7, 0x41, 0xa9, 0x14, 0x35, + 0x9f, 0x50, 0x35, 0x0e, 0xcb, 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xa3, 0x82, 0x21, + 0xba, 0x0b, 0xa7, 0x6f, 0x85, 0x81, 0xbe, 0x0c, 0xa3, 0x2d, 0x77, 0xdf, 0x8d, 0x82, 0x50, 0x6c, + 0x96, 0x13, 0x1a, 0x93, 0x27, 0x7c, 0xb0, 0xca, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, + 0xca, 0x16, 0xdf, 0xe9, 0x04, 0xb1, 0x73, 0x06, 0xc7, 0xf2, 0x75, 0xe3, 0x58, 0xfe, 0x4c, 0x2f, + 0xcf, 0x77, 0xd6, 0xa5, 0xdc, 0xe3, 0xf8, 0x76, 0xea, 0x38, 0x7e, 0xae, 0x3f, 0xa9, 0xde, 0xc7, + 0xf0, 0xbf, 0xb0, 0x60, 0xd6, 0xc0, 0x3f, 0x83, 0xd3, 0x60, 0xd5, 0x3c, 0x0d, 0x9e, 0xee, 0xfb, + 0x0d, 0x39, 0xa7, 0xc0, 0xd7, 0x0b, 0xa9, 0xbe, 0x33, 0xee, 0xff, 0x01, 0x0c, 0xed, 0x38, 0x61, + 0xab, 0x57, 0xd8, 0xdf, 0xae, 0x4a, 0xf3, 0x37, 0x9c, 0xb0, 0xc5, 0x79, 0xf8, 0x4b, 0x2a, 0xf7, + 0xa8, 0x13, 0xb6, 0xfa, 0xfa, 0xe5, 0xb0, 0xa6, 0xd0, 0x35, 0x18, 0x89, 0x9a, 0x41, 0x5b, 0xd9, + 0xe4, 0x5d, 0xe6, 0x79, 0x49, 0x69, 0xc9, 0xf1, 0x61, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, + 0xe7, 0xb6, 0xa1, 0xa4, 0x9a, 0x7e, 0xa4, 0x1e, 0x15, 0xbf, 0x55, 0x84, 0x73, 0x19, 0xeb, 0x02, + 0x45, 0xc6, 0x68, 0xbd, 0x32, 0xe0, 0x72, 0xfa, 0x88, 0xe3, 0x15, 0xb1, 0x1b, 0x4b, 0x4b, 0xcc, + 0xff, 0xc0, 0x8d, 0xde, 0x89, 0x48, 0xba, 0x51, 0x5a, 0xd4, 0xbf, 0x51, 0xda, 0xd8, 0x99, 0x0d, + 0x35, 0x6d, 0x48, 0xf5, 0xf4, 0x91, 0xce, 0xe9, 0x1f, 0x15, 0xe1, 0x7c, 0x56, 0xc0, 0x0c, 0xf4, + 0x9d, 0xa9, 0x24, 0x42, 0xaf, 0x0d, 0x1a, 0x6a, 0x83, 0x67, 0x16, 0x12, 0x11, 0xc6, 0xe6, 0xcd, + 0xb4, 0x42, 0x7d, 0x87, 0x59, 0xb4, 0xc9, 0x1c, 0xe5, 0x42, 0x9e, 0xfc, 0x49, 0x6e, 0xf1, 0xcf, + 0x0d, 0xdc, 0x01, 0x91, 0x35, 0x2a, 0x4a, 0x39, 0xca, 0xc9, 0xe2, 0xfe, 0x8e, 0x72, 0xb2, 0xe5, + 0x39, 0x17, 0xc6, 0xb5, 0xaf, 0x79, 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x14, 0xad, 0xdf, 0x8f, 0x74, + 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x25, 0x9c, 0x52, 0x49, 0x59, 0xb9, 0x2a, 0xa9, 0xcb, 0x30, 0x14, + 0x06, 0x1e, 0x49, 0xe7, 0x95, 0xc1, 0x81, 0x47, 0x30, 0x83, 0x50, 0x8c, 0x38, 0x51, 0x48, 0x4c, + 0xe8, 0x97, 0x2d, 0x71, 0x8d, 0x7a, 0x06, 0x86, 0x3d, 0xb2, 0x4f, 0xa4, 0x36, 0x42, 0xf1, 0xe4, + 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x73, 0x43, 0xf0, 0x54, 0x4f, 0x57, 0x53, 0x7a, 0x65, 0xd9, + 0x76, 0x62, 0x72, 0xdf, 0x39, 0x48, 0x47, 0xbd, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xbb, 0x5d, + 0x1e, 0x38, 0x33, 0xa5, 0xc0, 0x13, 0xf1, 0x32, 0x05, 0xd4, 0x54, 0x1c, 0x15, 0x4f, 0x43, 0x71, + 0x74, 0x15, 0x20, 0x8a, 0xbc, 0x15, 0x9f, 0x4a, 0x60, 0x2d, 0x61, 0x10, 0x9c, 0x04, 0x58, 0x6d, + 0xdc, 0x12, 0x10, 0xac, 0x61, 0xa1, 0x2a, 0xcc, 0xb4, 0xc3, 0x20, 0xe6, 0xfa, 0xd0, 0x2a, 0x37, + 0x45, 0x19, 0x36, 0xbd, 0xfc, 0xea, 0x29, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x87, 0x71, 0xe1, 0xf9, + 0x57, 0x0f, 0x02, 0x4f, 0xa8, 0x6a, 0x94, 0x61, 0x43, 0x23, 0x01, 0x61, 0x1d, 0x4f, 0xab, 0xc6, + 0x94, 0xac, 0xa3, 0x99, 0xd5, 0xb8, 0xa2, 0x55, 0xc3, 0x4b, 0x05, 0xcf, 0x19, 0x1b, 0x28, 0x78, + 0x4e, 0xa2, 0xbc, 0x2a, 0x0d, 0xfc, 0xae, 0x04, 0x7d, 0xd5, 0x3d, 0x3f, 0x3d, 0x04, 0xe7, 0xc4, + 0xc2, 0x79, 0xd4, 0xcb, 0xe5, 0x4e, 0xf7, 0x72, 0x39, 0x0d, 0xf5, 0xd6, 0x37, 0xd7, 0xcc, 0x59, + 0xaf, 0x99, 0x5f, 0x2c, 0xc2, 0x08, 0x9f, 0x8a, 0x33, 0x90, 0xe1, 0x57, 0x85, 0xd2, 0xaf, 0x47, + 0xd8, 0x18, 0xde, 0x97, 0xf9, 0xaa, 0x13, 0x3b, 0xfc, 0xfc, 0x52, 0x6c, 0x34, 0x51, 0x0f, 0xa2, + 0x79, 0x83, 0xd1, 0xce, 0xa5, 0xb4, 0x5a, 0xc0, 0x69, 0x68, 0x6c, 0xf7, 0x2b, 0x00, 0x11, 0x4b, + 0x9c, 0x4f, 0x69, 0x88, 0x00, 0x44, 0x2f, 0xf4, 0x68, 0xbd, 0xa1, 0x90, 0x79, 0x1f, 0x92, 0x25, + 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xf7, 0x06, 0x94, 0x14, 0x72, 0x3f, 0x15, 0xc0, 0x84, 0x7e, 0xea, + 0x7d, 0x11, 0xa6, 0x53, 0x6d, 0x9d, 0x48, 0x83, 0xf0, 0xf3, 0x16, 0x4c, 0xf3, 0x2e, 0xaf, 0xf8, + 0xfb, 0x62, 0xb3, 0x7f, 0x08, 0xe7, 0xbd, 0x8c, 0x4d, 0x27, 0x66, 0x74, 0xf0, 0x4d, 0xaa, 0x34, + 0x06, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x02, 0x63, 0xdc, 0xd1, 0xc5, 0xf1, 0x84, 0x73, 0xc2, + 0x04, 0x4f, 0x44, 0xc1, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x2c, 0xef, 0xf9, 0x4d, 0x72, + 0xa0, 0x6e, 0xc7, 0x1f, 0x67, 0xdf, 0x45, 0x9e, 0x8d, 0x42, 0x4e, 0x9e, 0x0d, 0xfd, 0xd3, 0x8a, + 0x3d, 0x3f, 0xed, 0xa7, 0x2c, 0x10, 0x2b, 0xf0, 0x0c, 0xee, 0x81, 0xdf, 0x6a, 0xde, 0x03, 0xe7, + 0xf2, 0x17, 0x75, 0xce, 0x05, 0xf0, 0x4f, 0x2c, 0x98, 0xe1, 0x08, 0xc9, 0x43, 0xe4, 0xc7, 0x3a, + 0x0f, 0x83, 0x24, 0x7f, 0x53, 0xd9, 0xb6, 0xb3, 0x3f, 0xca, 0x98, 0xac, 0xa1, 0x9e, 0x93, 0xd5, + 0x92, 0x1b, 0xe8, 0x04, 0x49, 0x0d, 0x4f, 0x1c, 0x1a, 0xd6, 0xfe, 0x43, 0x0b, 0x10, 0x6f, 0xc6, + 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, 0x58, 0xa7, + 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xff, 0xd7, 0xe4, 0x13, 0x8c, 0xe8, 0x5f, 0x1f, 0x82, 0xb4, + 0x25, 0x34, 0xba, 0x0b, 0x13, 0x4d, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0xea, 0x65, + 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x3b, 0x74, + 0xf7, 0x5d, 0x8f, 0x6c, 0xb3, 0xab, 0x30, 0x73, 0x87, 0xe2, 0xb6, 0x15, 0xb2, 0x14, 0x6b, 0x18, + 0x19, 0xee, 0x33, 0xc5, 0x47, 0xe7, 0x3e, 0x33, 0x74, 0x42, 0xf7, 0x99, 0xe1, 0x81, 0xdc, 0x67, + 0x30, 0x3c, 0x26, 0xcf, 0x6e, 0xfa, 0x7f, 0xd5, 0xf5, 0x88, 0x10, 0xd8, 0xb8, 0x93, 0xd4, 0xdc, + 0xd1, 0x61, 0xe5, 0x31, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x41, 0xd9, 0xf1, 0xbc, 0xe0, + 0xbe, 0x1a, 0xb5, 0x95, 0xa8, 0xe9, 0x78, 0x49, 0xa4, 0xf4, 0xb1, 0xa5, 0x27, 0x8f, 0x0e, 0x2b, + 0xe5, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xde, 0x85, 0x73, 0x0d, 0x12, 0xca, 0x3c, 0xa9, 0x6a, + 0x8b, 0x6d, 0x40, 0x29, 0x4c, 0x31, 0x95, 0x81, 0x62, 0x95, 0x68, 0x51, 0x2c, 0x25, 0x13, 0x49, + 0x08, 0xd9, 0x7f, 0x6c, 0xc1, 0xa8, 0xb0, 0xae, 0x3e, 0x03, 0x59, 0x66, 0xd1, 0xd0, 0x47, 0x56, + 0xb2, 0x19, 0x2f, 0xeb, 0x4c, 0xae, 0x26, 0xb2, 0x96, 0xd2, 0x44, 0x3e, 0xdd, 0x8b, 0x48, 0x6f, + 0x1d, 0xe4, 0x0f, 0x15, 0x61, 0xca, 0xb4, 0x2c, 0x3f, 0x83, 0x21, 0x58, 0x87, 0xd1, 0x48, 0xb8, + 0x31, 0x14, 0xf2, 0xed, 0x57, 0xd3, 0x93, 0x98, 0x58, 0xb9, 0x08, 0xc7, 0x05, 0x49, 0x24, 0xd3, + 0x3f, 0xa2, 0xf8, 0x08, 0xfd, 0x23, 0xfa, 0x19, 0xf7, 0x0f, 0x9d, 0x86, 0x71, 0xbf, 0xfd, 0x4b, + 0x8c, 0xf9, 0xeb, 0xe5, 0x67, 0x20, 0x17, 0x5c, 0x37, 0x8f, 0x09, 0xbb, 0xc7, 0xca, 0x12, 0x9d, + 0xca, 0x91, 0x0f, 0xfe, 0xb1, 0x05, 0xe3, 0x02, 0xf1, 0x0c, 0xba, 0xfd, 0x6d, 0x66, 0xb7, 0x9f, + 0xe8, 0xd1, 0xed, 0x9c, 0xfe, 0xfe, 0xdd, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x81, 0x32, 0x67, + 0x8c, 0xd1, 0xdb, 0x60, 0xd0, 0x0c, 0x3c, 0x71, 0x98, 0x3f, 0x99, 0xf8, 0xc9, 0xf2, 0xf2, 0x63, + 0xed, 0x37, 0x56, 0xd8, 0xcc, 0x8d, 0x33, 0x08, 0x63, 0x71, 0x80, 0x26, 0x6e, 0x9c, 0x41, 0x18, + 0x63, 0x06, 0x41, 0x2d, 0x80, 0xd8, 0x09, 0xb7, 0x49, 0x4c, 0xcb, 0x84, 0xcb, 0x7d, 0xfe, 0x2e, + 0xec, 0xc4, 0xae, 0x37, 0xef, 0xfa, 0x71, 0x14, 0x87, 0xf3, 0x35, 0x3f, 0xbe, 0x1d, 0xf2, 0xbb, + 0x81, 0xe6, 0xf8, 0xaa, 0x68, 0x61, 0x8d, 0xae, 0xf4, 0xbc, 0x62, 0x6d, 0x0c, 0x9b, 0x0f, 0x85, + 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0d, 0xc6, 0x93, 0xd9, 0x00, 0x9d, 0xcc, 0x27, 0xf5, 0x37, + 0xc6, 0xd4, 0xd0, 0xb2, 0x57, 0x82, 0xaa, 0xee, 0xf9, 0xda, 0x9b, 0x05, 0xd2, 0x86, 0x75, 0xb7, + 0x80, 0xc4, 0x3d, 0x16, 0x7d, 0x7b, 0xd7, 0xfb, 0xf1, 0xcb, 0x7d, 0x78, 0xe9, 0x09, 0x5e, 0x8c, + 0x59, 0xf8, 0x55, 0x16, 0xa6, 0xb2, 0x56, 0x4f, 0xe7, 0x36, 0x59, 0x96, 0x00, 0x9c, 0xe0, 0xa0, + 0x05, 0x71, 0xb3, 0xe4, 0xfa, 0xb9, 0x27, 0x52, 0x37, 0x4b, 0xf9, 0xf9, 0xda, 0xd5, 0xf2, 0x15, + 0x18, 0x57, 0xf9, 0xe2, 0xea, 0x3c, 0xed, 0x96, 0x08, 0x40, 0xb0, 0x92, 0x14, 0x63, 0x1d, 0x07, + 0x6d, 0xc0, 0x74, 0xc4, 0x93, 0xd9, 0x49, 0x67, 0x28, 0xa1, 0x37, 0x78, 0x41, 0xbe, 0x3b, 0x37, + 0x4c, 0xf0, 0x31, 0x2b, 0xe2, 0x9b, 0x55, 0xba, 0x4f, 0xa5, 0x49, 0xa0, 0xb7, 0x60, 0xca, 0xd3, + 0x93, 0x7a, 0xd7, 0x85, 0x5a, 0x41, 0x99, 0x65, 0x1a, 0x29, 0xbf, 0xeb, 0x38, 0x85, 0x4d, 0x85, + 0x00, 0xbd, 0x44, 0x44, 0x2f, 0x73, 0xfc, 0x6d, 0x12, 0x89, 0x6c, 0x57, 0x4c, 0x08, 0xb8, 0x95, + 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x6b, 0x30, 0x21, 0x3f, 0x5f, 0x73, 0x0e, 0x4c, 0x8c, 0x7f, 0x35, + 0x18, 0x36, 0x30, 0xd1, 0x7d, 0xb8, 0x20, 0xff, 0x6f, 0x84, 0xce, 0xd6, 0x96, 0xdb, 0x14, 0xbe, + 0x99, 0xe3, 0x8c, 0xc4, 0xa2, 0xf4, 0x84, 0x58, 0xc9, 0x42, 0x3a, 0x3e, 0xac, 0x5c, 0x16, 0xa3, + 0x96, 0x09, 0x67, 0x93, 0x98, 0x4d, 0x1f, 0xad, 0xc1, 0xb9, 0x1d, 0xe2, 0x78, 0xf1, 0xce, 0xf2, + 0x0e, 0x69, 0xee, 0xca, 0x4d, 0xc4, 0x5c, 0x0e, 0x35, 0x93, 0xd9, 0x1b, 0xdd, 0x28, 0x38, 0xab, + 0x1e, 0x7a, 0x0f, 0xca, 0xed, 0xce, 0xa6, 0xe7, 0x46, 0x3b, 0xeb, 0x41, 0xcc, 0x9e, 0xba, 0x55, + 0xba, 0x35, 0xe1, 0x9b, 0xa8, 0xdc, 0x2d, 0xeb, 0x39, 0x78, 0x38, 0x97, 0x02, 0xfa, 0x10, 0x2e, + 0xa4, 0x16, 0x83, 0xf0, 0x94, 0x9a, 0xca, 0x8f, 0x05, 0xd9, 0xc8, 0xaa, 0xc0, 0x3d, 0x66, 0x33, + 0x41, 0x38, 0xbb, 0x89, 0x8f, 0x66, 0x00, 0xf1, 0x01, 0xad, 0xac, 0x49, 0x37, 0xe8, 0xab, 0x30, + 0xa1, 0xaf, 0x22, 0x71, 0xc0, 0x3c, 0xdb, 0x2f, 0x81, 0xbd, 0x90, 0x8d, 0xd4, 0x8a, 0xd2, 0x61, + 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, 0xc6, 0x9a, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, + 0xef, 0xe5, 0x53, 0xbf, 0x2c, 0x70, 0xc4, 0x80, 0x89, 0xe0, 0x79, 0xbc, 0x0c, 0x2b, 0x0a, 0xf6, + 0xaf, 0x14, 0xa0, 0xd2, 0x27, 0x12, 0x63, 0x4a, 0x07, 0x68, 0x0d, 0xa4, 0x03, 0x5c, 0x94, 0xc9, + 0xe3, 0xd6, 0x53, 0xf7, 0xcf, 0x54, 0x62, 0xb8, 0xe4, 0x16, 0x9a, 0xc6, 0x1f, 0xd8, 0x6e, 0x52, + 0x57, 0x23, 0x0e, 0xf5, 0xb5, 0xe8, 0x35, 0x9e, 0x0f, 0x86, 0x07, 0x97, 0xe8, 0x73, 0x55, 0xc1, + 0xf6, 0x2f, 0x15, 0xe0, 0x82, 0x1a, 0xc2, 0x3f, 0xbf, 0x03, 0x77, 0xa7, 0x7b, 0xe0, 0x4e, 0x41, + 0x91, 0x6e, 0xdf, 0x86, 0x91, 0xc6, 0x41, 0xd4, 0x8c, 0xbd, 0x01, 0x04, 0xa0, 0x67, 0xcc, 0xd8, + 0x32, 0xea, 0x98, 0x36, 0xe2, 0xcb, 0xfc, 0x15, 0x0b, 0xa6, 0x37, 0x96, 0xeb, 0x8d, 0xa0, 0xb9, + 0x4b, 0xe2, 0x45, 0xae, 0x26, 0xc2, 0x42, 0xfe, 0xb1, 0x1e, 0x52, 0xae, 0xc9, 0x92, 0x98, 0x2e, + 0xc3, 0xd0, 0x4e, 0x10, 0xc5, 0xe9, 0x57, 0xb6, 0x1b, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x7f, 0xc7, + 0x82, 0x61, 0x96, 0xf2, 0xb4, 0x5f, 0x6a, 0xdc, 0x41, 0xbe, 0x0b, 0xbd, 0x0e, 0x23, 0x64, 0x6b, + 0x8b, 0x34, 0x63, 0x31, 0xab, 0xd2, 0xbb, 0x6e, 0x64, 0x85, 0x95, 0xd2, 0x43, 0x9f, 0x35, 0xc6, + 0xff, 0x62, 0x81, 0x8c, 0xee, 0x41, 0x29, 0x76, 0xf7, 0xc8, 0x62, 0xab, 0x25, 0xde, 0x29, 0x1e, + 0xc2, 0x99, 0x71, 0x43, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x07, 0x0a, 0x00, 0x89, 0x43, 0x6f, 0xbf, + 0x4f, 0x5c, 0xea, 0xca, 0xfe, 0xfb, 0x6c, 0x46, 0xf6, 0x5f, 0x94, 0x10, 0xcc, 0xc8, 0xfd, 0xab, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x9d, 0x64, 0x98, 0x96, 0x61, 0x36, 0x71, 0x48, 0x36, 0xa3, + 0x33, 0xb0, 0x78, 0xec, 0x1b, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0xbf, 0xd7, 0x02, 0xe1, 0x6e, 0x30, + 0xc0, 0x62, 0x7e, 0x57, 0x26, 0xea, 0x34, 0x02, 0xba, 0x5e, 0xce, 0xf7, 0xbf, 0x10, 0x61, 0x5c, + 0xd5, 0xe1, 0x61, 0x04, 0x6f, 0x35, 0x68, 0xd9, 0x2d, 0x10, 0xd0, 0x2a, 0x61, 0x4a, 0x86, 0xfe, + 0xbd, 0xb9, 0x0a, 0xd0, 0x62, 0xb8, 0x5a, 0xe2, 0x3f, 0xc5, 0xaa, 0xaa, 0x0a, 0x82, 0x35, 0x2c, + 0xfb, 0x6f, 0x16, 0x60, 0x5c, 0x06, 0x10, 0xa5, 0xf7, 0xf8, 0xfe, 0xad, 0x9c, 0x28, 0x67, 0x00, + 0xcb, 0x94, 0x49, 0x09, 0xab, 0xd0, 0xf2, 0x7a, 0xa6, 0x4c, 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1e, + 0x46, 0xa3, 0xce, 0x26, 0x43, 0x4f, 0x19, 0xd1, 0x37, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x25, 0x98, + 0xe1, 0xf5, 0xc2, 0xa0, 0xed, 0x6c, 0x73, 0x0d, 0xd2, 0xb0, 0xf2, 0x6a, 0x9b, 0x59, 0x4b, 0xc1, + 0x8e, 0x0f, 0x2b, 0xe7, 0xd3, 0x65, 0x4c, 0xf7, 0xd8, 0x45, 0x85, 0xee, 0x8b, 0x99, 0xb4, 0xc3, + 0x0c, 0xba, 0x01, 0x23, 0x9c, 0xe5, 0x09, 0x16, 0xd4, 0xe3, 0x45, 0x49, 0x73, 0xb3, 0x61, 0x41, + 0xd4, 0x05, 0xd7, 0x14, 0xf5, 0xd1, 0x7b, 0x30, 0xde, 0x0a, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, + 0x58, 0xaf, 0x89, 0x55, 0x93, 0x29, 0x39, 0x55, 0x13, 0x34, 0xdd, 0x75, 0x87, 0x69, 0x4f, 0x13, + 0x10, 0xd6, 0xc9, 0xa1, 0x0d, 0x16, 0xe3, 0x89, 0xa7, 0xb2, 0xef, 0x65, 0x75, 0xa6, 0xb2, 0xdf, + 0x6b, 0x94, 0x27, 0x45, 0x20, 0x28, 0x91, 0x08, 0x3f, 0x21, 0x64, 0x7f, 0xed, 0x1c, 0x18, 0xab, + 0xd5, 0xc8, 0x19, 0x60, 0x9d, 0x52, 0xce, 0x00, 0x0c, 0x63, 0x64, 0xaf, 0x1d, 0x1f, 0x54, 0xdd, + 0xb0, 0x57, 0xd2, 0x99, 0x15, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x9d, 0xd8, 0xa1, + 0xf8, 0x31, 0x26, 0x76, 0x18, 0x3a, 0xc3, 0xc4, 0x0e, 0xeb, 0x30, 0xba, 0xed, 0xc6, 0x98, 0xb4, + 0x03, 0x71, 0xdc, 0x67, 0xae, 0x84, 0xeb, 0x1c, 0xa5, 0x3b, 0xc0, 0xb8, 0x00, 0x60, 0x49, 0x04, + 0xbd, 0xad, 0xf6, 0xc0, 0x48, 0xbe, 0xb4, 0xdc, 0xfd, 0xf8, 0x90, 0xb9, 0x0b, 0x44, 0x22, 0x87, + 0xd1, 0x87, 0x4d, 0xe4, 0xb0, 0x2a, 0xd3, 0x2f, 0x8c, 0xe5, 0x1b, 0x69, 0xb2, 0xec, 0x0a, 0x7d, + 0x92, 0x2e, 0x18, 0x89, 0x2a, 0x4a, 0xa7, 0x97, 0xa8, 0xe2, 0x7b, 0x2d, 0xb8, 0xd0, 0xce, 0xca, + 0xd9, 0x22, 0xd2, 0x27, 0xbc, 0x3e, 0x70, 0x52, 0x1a, 0xa3, 0x41, 0x76, 0x6d, 0xca, 0x44, 0xc3, + 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5b, 0x22, 0xe7, 0xc2, 0x33, 0x39, 0x19, 0x2f, 0x7a, 0xe4, + 0xb9, 0x78, 0x34, 0x79, 0x16, 0x92, 0x5c, 0x17, 0x93, 0x1f, 0x39, 0xd7, 0xc5, 0xdb, 0x2a, 0xd7, + 0x45, 0x8f, 0x48, 0x3a, 0x3c, 0x93, 0x45, 0xdf, 0x0c, 0x17, 0x5a, 0x96, 0x8a, 0xe9, 0xd3, 0xc8, + 0x52, 0xf1, 0x15, 0x93, 0xd9, 0xf3, 0x94, 0x09, 0x2f, 0xf6, 0x61, 0xf6, 0x06, 0xdd, 0xde, 0xec, + 0x9e, 0x67, 0xe4, 0x98, 0x7d, 0xa8, 0x8c, 0x1c, 0x77, 0xf5, 0x5c, 0x17, 0xa8, 0x4f, 0x32, 0x07, + 0x8a, 0x34, 0x60, 0x86, 0x8b, 0xbb, 0xfa, 0x11, 0x74, 0x2e, 0x9f, 0xae, 0x3a, 0x69, 0xba, 0xe9, + 0x66, 0x1d, 0x42, 0xdd, 0x99, 0x33, 0xce, 0x9f, 0x4d, 0xe6, 0x8c, 0x0b, 0xa7, 0x9e, 0x39, 0xe3, + 0xb1, 0x33, 0xc8, 0x9c, 0xf1, 0xf8, 0xc7, 0x9a, 0x39, 0xa3, 0xfc, 0x08, 0x32, 0x67, 0xac, 0x27, + 0x99, 0x33, 0x2e, 0xe6, 0x4f, 0x49, 0x86, 0x55, 0x5a, 0x4e, 0xbe, 0x8c, 0xbb, 0x50, 0x6a, 0x4b, + 0x9f, 0x6a, 0x11, 0xea, 0x27, 0x3b, 0x51, 0x5f, 0x96, 0xe3, 0x35, 0x9f, 0x12, 0x05, 0xc2, 0x09, + 0x29, 0x4a, 0x37, 0xc9, 0x9f, 0xf1, 0x44, 0x0f, 0xc5, 0x58, 0x96, 0xca, 0x21, 0x3f, 0x6b, 0x86, + 0xfd, 0x57, 0x0b, 0x70, 0xa9, 0xf7, 0xba, 0x4e, 0xf4, 0x15, 0xf5, 0x44, 0xbf, 0x9e, 0xd2, 0x57, + 0xf0, 0x4b, 0x40, 0x82, 0x35, 0x70, 0xe0, 0x89, 0xeb, 0x30, 0xab, 0xcc, 0xd1, 0x3c, 0xb7, 0x79, + 0xa0, 0x25, 0xf0, 0x53, 0xae, 0x31, 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, 0x61, 0xda, 0x28, + 0xac, 0x55, 0x85, 0xb0, 0xaf, 0x14, 0x24, 0x0d, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0xd7, 0x2d, 0x78, + 0x3c, 0x27, 0x64, 0xf5, 0xc0, 0x71, 0x15, 0xb6, 0x60, 0xba, 0x6d, 0x56, 0xed, 0x13, 0x7e, 0xc5, + 0x08, 0x8c, 0xad, 0xfa, 0x9a, 0x02, 0xe0, 0x34, 0xd1, 0xa5, 0x2b, 0xbf, 0xf6, 0x7b, 0x97, 0x3e, + 0xf5, 0x9b, 0xbf, 0x77, 0xe9, 0x53, 0xbf, 0xfd, 0x7b, 0x97, 0x3e, 0xf5, 0x17, 0x8f, 0x2e, 0x59, + 0xbf, 0x76, 0x74, 0xc9, 0xfa, 0xcd, 0xa3, 0x4b, 0xd6, 0x6f, 0x1f, 0x5d, 0xb2, 0x7e, 0xf7, 0xe8, + 0x92, 0xf5, 0x03, 0xbf, 0x7f, 0xe9, 0x53, 0xef, 0x16, 0xf6, 0x5f, 0xf9, 0xff, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x99, 0x05, 0xfa, 0x71, 0xfe, 0xdd, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d3653ac6..096d5240 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -32,7 +32,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -170,6 +170,23 @@ message Binding { optional ObjectReference target = 2; } +// Represents storage that is managed by an external CSI volume driver +message CSIPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + // Required. + optional string driver = 1; + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + optional string volumeHandle = 2; + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 3; +} + // Adds and removes POSIX capabilities from running containers. message Capabilities { // Added capabilities @@ -340,7 +357,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -376,7 +393,7 @@ message ConfigMapList { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -401,7 +418,7 @@ message ConfigMapProjection { } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -516,6 +533,13 @@ message Container { // +patchStrategy=merge repeated VolumeMount volumeMounts = 9; + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -765,6 +789,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -1001,7 +1029,6 @@ message EnvVarSource { } // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -1040,6 +1067,30 @@ message Event { // Type of this event (Normal, Warning), new types could be added in the future // +optional optional string type = 9; + + // Time when this Event was first observed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 11; + + // What action was taken/failed regarding to the Regarding object. + // +optional + optional string action = 12; + + // Optional secondary object for more complex actions. + // +optional + optional ObjectReference related = 13; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingComponent = 14; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 15; } // EventList is a list of events. @@ -1053,6 +1104,19 @@ message EventList { repeated Event items = 2; } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time of the last occurence observed + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // State of this Series: Ongoing or Finished + optional string state = 3; +} + // EventSource contains information for an event. message EventSource { // Component from which the event is generated. @@ -1150,7 +1214,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1299,21 +1363,22 @@ message HostPathVolumeSource { optional string type = 2; } -// Represents an ISCSI disk. +// ISCSIPersistentVolumeSource represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port +message ISCSIPersistentVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; // Target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI target lun number. + // iSCSI Target Lun number. optional int32 lun = 3; - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional optional string iscsiInterface = 4; @@ -1330,7 +1395,7 @@ message ISCSIVolumeSource { // +optional optional bool readOnly = 6; - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional repeated string portals = 7; @@ -1343,11 +1408,67 @@ message ISCSIVolumeSource { // +optional optional bool chapAuthSession = 11; - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional SecretReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication // +optional optional LocalObjectReference secretRef = 10; - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1907,12 +2028,12 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional @@ -1922,7 +2043,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -1938,7 +2059,7 @@ message ObjectMeta { // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids @@ -1950,7 +2071,7 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . @@ -1966,7 +2087,7 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. @@ -1987,7 +2108,7 @@ message ObjectMeta { // timestamp, until an administrator or automated process can determine the resource is // fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -2029,7 +2150,7 @@ message ObjectMeta { // this object has been completely initialized. Otherwise, the object is considered uninitialized // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to // observe uninitialized objects. - // + // // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. @@ -2198,6 +2319,12 @@ message PersistentVolumeClaimSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional optional string storageClassName = 5; + + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 6; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2292,7 +2419,7 @@ message PersistentVolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - optional ISCSIVolumeSource iscsi = 7; + optional ISCSIPersistentVolumeSource iscsi = 7; // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md @@ -2352,6 +2479,10 @@ message PersistentVolumeSource { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; + + // CSI represents storage that handled by an external CSI driver + // +optional + optional CSIPersistentVolumeSource csi = 22; } // PersistentVolumeSpec is the specification of a persistent volume. @@ -2393,6 +2524,12 @@ message PersistentVolumeSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional repeated string mountOptions = 7; + + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 8; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -2489,10 +2626,7 @@ message PodAffinityTerm { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional + // Empty topologyKey is not allowed. optional string topologyKey = 3; } @@ -2583,6 +2717,38 @@ message PodCondition { optional string message = 6; } +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +message PodDNSConfig { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + repeated string nameservers = 1; + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + repeated string searches = 2; + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + repeated PodDNSConfigOption options = 3; +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +message PodDNSConfigOption { + // Required. + optional string name = 1; + + // +optional + optional string value = 2; +} + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -2732,11 +2898,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -2807,10 +2973,13 @@ message PodSpec { // +optional optional int64 activeDeadlineSeconds = 5; - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional optional string dnsPolicy = 6; @@ -2919,6 +3088,13 @@ message PodSpec { // The higher the value, the higher the priority. // +optional optional int32 priority = 25; + + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + optional PodDNSConfig dnsConfig = 26; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3609,7 +3785,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -3647,7 +3823,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -3683,7 +3859,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. @@ -3978,7 +4154,8 @@ message ServiceSpec { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional optional string externalName = 10; @@ -4183,6 +4360,15 @@ message Volume { optional VolumeSource volumeSource = 2; } +// volumeDevice describes a mapping of a raw block device within a container. +message VolumeDevice { + // name must match the name of a persistentVolumeClaim in the pod + optional string name = 1; + + // devicePath is the path inside of the container that the device will be mapped to. + optional string devicePath = 2; +} + // VolumeMount describes a mounting of a Volume within a container. message VolumeMount { // This must match the Name of a Volume. @@ -4385,3 +4571,4 @@ message WeightedPodAffinityTerm { // Required. A pod affinity term, associated with the corresponding weight. optional PodAffinityTerm podAffinityTerm = 2; } + diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 4916ee3c..62bf076e 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -43,7 +43,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Pod{}, diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3c369858..6ab69116 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -402,7 +402,7 @@ type PersistentVolumeSource struct { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional @@ -448,6 +448,9 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` + // CSI represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } const ( @@ -524,6 +527,11 @@ type PersistentVolumeSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -541,6 +549,16 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. @@ -628,6 +646,11 @@ type PersistentVolumeClaimSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -989,7 +1012,7 @@ type StorageMedium string const ( StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugepages StorageMedium = "HugePages" // use hugepages + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -1213,14 +1236,15 @@ type NFSVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` // Target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI target lun number. + // iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // Filesystem type of the volume that you want to mount. @@ -1234,7 +1258,7 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` @@ -1244,10 +1268,56 @@ type ISCSIVolumeSource struct { // whether support iSCSI Session CHAP authentication // +optional SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1621,6 +1691,23 @@ type LocalVolumeSource struct { Path string `json:"path" protobuf:"bytes,1,opt,name=path"` } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + // ContainerPort represents a network port in a single container. type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each @@ -1689,6 +1776,14 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name of the environment variable. Must be a C_IDENTIFIER. @@ -2032,6 +2127,12 @@ type Container struct { // +patchMergeKey=mountPath // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -2330,6 +2431,11 @@ const ( // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" + DefaultTerminationGracePeriodSeconds = 30 ) @@ -2486,11 +2592,8 @@ type PodAffinityTerm struct { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional - TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. @@ -2662,10 +2765,13 @@ type PodSpec struct { // Value must be a positive integer. // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. @@ -2758,6 +2864,12 @@ type PodSpec struct { // The higher the value, the higher the priority. // +optional Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -2825,6 +2937,35 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -3288,7 +3429,8 @@ type ServiceSpec struct { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -3841,8 +3983,6 @@ const ( ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" // Default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" // Name prefix for huge page resources (alpha). @@ -4034,6 +4174,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` } @@ -4326,7 +4470,6 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -4366,8 +4509,51 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future // +optional Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` + // Time of the last occurence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` + // State of this Series: Ongoing or Finished + State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EventList is a list of events. @@ -4495,6 +4681,13 @@ const ( ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" ) +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + // A ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string @@ -4923,7 +5116,7 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 + DefaultHardPodAffinitySymmetricWeight int32 = 1 ) // Sysctl defines a kernel parameter to be set diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index d8176d2f..0f141b41 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -116,6 +116,17 @@ func (Binding) SwaggerDoc() map[string]string { return map_Binding } +var map_CSIPersistentVolumeSource = map[string]string{ + "": "Represents storage that is managed by an external CSI volume driver", + "driver": "Driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", +} + +func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CSIPersistentVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -278,6 +289,7 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -392,7 +404,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -529,16 +541,22 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", + "eventTime": "Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "action": "What action was taken/failed regarding to the Regarding object.", + "related": "Optional secondary object for more complex actions.", + "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", } func (Event) SwaggerDoc() map[string]string { @@ -555,6 +573,17 @@ func (EventList) SwaggerDoc() map[string]string { return map_EventList } +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time of the last occurence observed", + "state": "State of this Series: Ongoing or Finished", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + var map_EventSource = map[string]string{ "": "EventSource contains information for an event.", "component": "Component from which the event is generated.", @@ -698,19 +727,38 @@ func (HostPathVolumeSource) SwaggerDoc() map[string]string { return map_HostPathVolumeSource } -var map_ISCSIVolumeSource = map[string]string{ - "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", +var map_ISCSIPersistentVolumeSource = map[string]string{ + "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI target lun number.", - "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI initiator name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIPersistentVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { @@ -1151,6 +1199,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1212,6 +1261,7 @@ var map_PersistentVolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "csi": "CSI represents storage that handled by an external CSI driver", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1226,6 +1276,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1278,7 +1329,7 @@ var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1322,6 +1373,26 @@ func (PodCondition) SwaggerDoc() map[string]string { return map_PodCondition } +var map_PodDNSConfig = map[string]string{ + "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", +} + +func (PodDNSConfig) SwaggerDoc() map[string]string { + return map_PodDNSConfig +} + +var map_PodDNSConfigOption = map[string]string{ + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Required.", +} + +func (PodDNSConfigOption) SwaggerDoc() map[string]string { + return map_PodDNSConfigOption +} + var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", @@ -1410,7 +1481,7 @@ var map_PodSpec = map[string]string{ "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", @@ -1429,6 +1500,7 @@ var map_PodSpec = map[string]string{ "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", "priorityClassName": "If specified, indicates the pod's priority. \"SYSTEM\" is a special keyword which indicates the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1956,7 +2028,7 @@ var map_ServiceSpec = map[string]string{ "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery. This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints when that annotation is deprecated and all clients have been converted to use this field.", @@ -2065,6 +2137,16 @@ func (Volume) SwaggerDoc() map[string]string { return map_Volume } +var map_VolumeDevice = map[string]string{ + "": "volumeDevice describes a mapping of a raw block device within a container.", + "name": "name must match the name of a persistentVolumeClaim in the pod", + "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", +} + +func (VolumeDevice) SwaggerDoc() map[string]string { + return map_VolumeDevice +} + var map_VolumeMount = map[string]string{ "": "VolumeMount describes a mounting of a Volume within a container.", "name": "This must match the Name of a Volume.", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 8263ba0c..951b2e26 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -23,745 +23,10 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { *out = *in @@ -981,6 +246,22 @@ func (in *Binding) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -1425,6 +706,11 @@ func (in *Container) DeepCopyInto(out *Container) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe if *in == nil { @@ -2097,6 +1383,25 @@ func (in *Event) DeepCopyInto(out *Event) { out.Source = in.Source in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } return } @@ -2153,6 +1458,23 @@ func (in *EventList) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSource) DeepCopyInto(out *EventSource) { *out = *in @@ -2448,6 +1770,45 @@ func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { *out = *in @@ -3590,6 +2951,15 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec **out = **in } } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -3750,7 +3120,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ISCSIVolumeSource) + *out = new(ISCSIPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3880,6 +3250,15 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { (*in).DeepCopyInto(*out) } } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } return } @@ -3923,6 +3302,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -4131,6 +3519,64 @@ func (in *PodCondition) DeepCopy() *PodCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { *out = *in @@ -4501,6 +3947,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { **out = **in } } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } return } @@ -6032,6 +5487,22 @@ func (in *Volume) DeepCopy() *Volume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go similarity index 75% rename from vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go rename to vendor/k8s.io/api/events/v1beta1/doc.go index cd40e74b..8b1a3e31 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unstructured provides conversion from runtime objects -// to map[string]interface{} representation. -package unstructured // import "k8s.io/apimachinery/pkg/conversion/unstructured" +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=events.k8s.io +package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go new file mode 100644 index 00000000..4861697c --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -0,0 +1,1306 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + + It has these top-level messages: + Event + EventList + EventSeries +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") +} +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n2, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Series != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n3, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) + n4, err := m.Regarding.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.Related != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n5, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i += copy(dAtA[i:], m.Note) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) + n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) + n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) + n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x78 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Regarding.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Note) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedFirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedLastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DeprecatedCount)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Note:` + fmt.Sprintf("%v", this.Note) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &k8s_io_api_core_v1.ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Note = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) + } + m.DeprecatedCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 814 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb6, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x3a, 0x40, + 0x60, 0x14, 0x08, 0x59, 0xa7, 0x45, 0xdb, 0x6b, 0x18, 0xbb, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0xe8, 0xad, 0xa5, 0x5d, 0x62, 0x77, 0x29, 0xc0, 0xb7, 0x5e, 0x0a, + 0xf4, 0xd8, 0x67, 0xe8, 0x13, 0xf4, 0x31, 0x7c, 0xcc, 0x31, 0x27, 0xa1, 0x66, 0xdf, 0xa2, 0xa7, + 0x82, 0xcb, 0x95, 0x28, 0x8b, 0x16, 0xec, 0x22, 0x37, 0x72, 0xe6, 0xfb, 0x99, 0x19, 0x0e, 0x07, + 0x45, 0xe7, 0xdf, 0xe9, 0x80, 0xcb, 0xf0, 0x3c, 0x1b, 0x80, 0x12, 0x60, 0x40, 0x87, 0x13, 0x10, + 0x43, 0xa9, 0x42, 0x97, 0x60, 0x29, 0x0f, 0x61, 0x02, 0xc2, 0xe8, 0x70, 0xb2, 0x3f, 0x00, 0xc3, + 0xf6, 0xc3, 0x04, 0x04, 0x28, 0x66, 0x60, 0x18, 0xa4, 0x4a, 0x1a, 0x89, 0x9f, 0x94, 0xd0, 0x80, + 0xa5, 0x3c, 0x28, 0xa1, 0x81, 0x83, 0xee, 0x3c, 0x4f, 0xb8, 0x39, 0xcb, 0x06, 0x41, 0x2c, 0xc7, + 0x61, 0x22, 0x13, 0x19, 0x5a, 0xc6, 0x20, 0x7b, 0x6f, 0xdf, 0xec, 0x8b, 0x7d, 0x2a, 0x95, 0x76, + 0x76, 0x17, 0x4c, 0x63, 0xa9, 0x20, 0x9c, 0xd4, 0xdc, 0x76, 0xbe, 0xae, 0x30, 0x63, 0x16, 0x9f, + 0x71, 0x01, 0xea, 0x22, 0x4c, 0xcf, 0x93, 0x22, 0xa0, 0xc3, 0x31, 0x18, 0x76, 0x13, 0x2b, 0x5c, + 0xc5, 0x52, 0x99, 0x30, 0x7c, 0x0c, 0x35, 0xc2, 0x37, 0xb7, 0x11, 0x74, 0x7c, 0x06, 0x63, 0x56, + 0xe3, 0x7d, 0xb5, 0x8a, 0x97, 0x19, 0x3e, 0x0a, 0xb9, 0x30, 0xda, 0xa8, 0x65, 0xd2, 0xee, 0x9f, + 0x6d, 0xd4, 0x3c, 0x2c, 0x26, 0x87, 0xdf, 0xa1, 0x8d, 0xa2, 0x85, 0x21, 0x33, 0x8c, 0x78, 0x7d, + 0x6f, 0xaf, 0xf3, 0xe2, 0xcb, 0xa0, 0x1a, 0xef, 0x5c, 0x31, 0x48, 0xcf, 0x93, 0x22, 0xa0, 0x83, + 0x02, 0x1d, 0x4c, 0xf6, 0x83, 0xb7, 0x83, 0x5f, 0x20, 0x36, 0xc7, 0x60, 0x58, 0x84, 0x2f, 0xa7, + 0xbd, 0x46, 0x3e, 0xed, 0xa1, 0x2a, 0x46, 0xe7, 0xaa, 0xf8, 0x1d, 0x6a, 0xdb, 0x8f, 0x74, 0xca, + 0xc7, 0x40, 0xee, 0x59, 0x8b, 0xf0, 0x6e, 0x16, 0xc7, 0x3c, 0x56, 0xb2, 0xa0, 0x45, 0x5b, 0xce, + 0xa1, 0x7d, 0x38, 0x53, 0xa2, 0x95, 0x28, 0x7e, 0x83, 0x5a, 0x1a, 0x14, 0x07, 0x4d, 0xee, 0x5b, + 0xf9, 0x67, 0xc1, 0xca, 0x05, 0x09, 0xac, 0xc0, 0x89, 0x45, 0x47, 0x28, 0x9f, 0xf6, 0x5a, 0xe5, + 0x33, 0x75, 0x0a, 0xf8, 0x18, 0x3d, 0x56, 0x90, 0x4a, 0x65, 0xb8, 0x48, 0x5e, 0x49, 0x61, 0x94, + 0x1c, 0x8d, 0x40, 0x91, 0xb5, 0xbe, 0xb7, 0xd7, 0x8e, 0x3e, 0x73, 0x65, 0x3c, 0xa6, 0x75, 0x08, + 0xbd, 0x89, 0x87, 0x7f, 0x40, 0x5b, 0xf3, 0xf0, 0x6b, 0xa1, 0x0d, 0x13, 0x31, 0x90, 0xa6, 0x15, + 0x7b, 0xe2, 0xc4, 0xb6, 0xe8, 0x32, 0x80, 0xd6, 0x39, 0xf8, 0x19, 0x6a, 0xb1, 0xd8, 0x70, 0x29, + 0x48, 0xcb, 0xb2, 0x1f, 0x39, 0x76, 0xeb, 0xa5, 0x8d, 0x52, 0x97, 0x2d, 0x70, 0x0a, 0x98, 0x96, + 0x82, 0xac, 0x5f, 0xc7, 0x51, 0x1b, 0xa5, 0x2e, 0x8b, 0x4f, 0x51, 0x5b, 0x41, 0xc2, 0xd4, 0x90, + 0x8b, 0x84, 0x6c, 0xd8, 0xb1, 0x3d, 0x5d, 0x1c, 0x5b, 0xf1, 0x37, 0x54, 0x9f, 0x99, 0xc2, 0x7b, + 0x50, 0x20, 0xe2, 0x85, 0x2f, 0x41, 0x67, 0x6c, 0x5a, 0x09, 0xe1, 0x37, 0x68, 0x5d, 0xc1, 0xa8, + 0x58, 0x34, 0xd2, 0xbe, 0xbb, 0x66, 0x27, 0x9f, 0xf6, 0xd6, 0x69, 0xc9, 0xa3, 0x33, 0x01, 0xdc, + 0x47, 0x6b, 0x42, 0x1a, 0x20, 0xc8, 0xf6, 0xf1, 0xc0, 0xf9, 0xae, 0xfd, 0x28, 0x0d, 0x50, 0x9b, + 0x29, 0x10, 0xe6, 0x22, 0x05, 0xd2, 0xb9, 0x8e, 0x38, 0xbd, 0x48, 0x81, 0xda, 0x0c, 0x06, 0xd4, + 0x1d, 0x42, 0xaa, 0x20, 0x2e, 0x14, 0x4f, 0x64, 0xa6, 0x62, 0x20, 0x0f, 0x6c, 0x61, 0xbd, 0x9b, + 0x0a, 0x2b, 0x97, 0xc3, 0xc2, 0x22, 0xe2, 0xe4, 0xba, 0x07, 0x4b, 0x02, 0xb4, 0x26, 0x89, 0x7f, + 0xf7, 0x10, 0xa9, 0x82, 0xdf, 0x73, 0xa5, 0xed, 0x62, 0x6a, 0xc3, 0xc6, 0x29, 0x79, 0x68, 0xfd, + 0xbe, 0xb8, 0xdb, 0xca, 0xdb, 0x6d, 0xef, 0x3b, 0x6b, 0x72, 0xb0, 0x42, 0x93, 0xae, 0x74, 0xc3, + 0xbf, 0x79, 0x68, 0xbb, 0x4a, 0x1e, 0xb1, 0xc5, 0x4a, 0x1e, 0xfd, 0xef, 0x4a, 0x7a, 0xae, 0x92, + 0xed, 0x83, 0x9b, 0x25, 0xe9, 0x2a, 0x2f, 0xfc, 0x12, 0x6d, 0x56, 0xa9, 0x57, 0x32, 0x13, 0x86, + 0x6c, 0xf6, 0xbd, 0xbd, 0x66, 0xb4, 0xed, 0x24, 0x37, 0x0f, 0xae, 0xa7, 0xe9, 0x32, 0x7e, 0xf7, + 0x2f, 0x0f, 0x95, 0xff, 0xfb, 0x11, 0xd7, 0x06, 0xff, 0x5c, 0x3b, 0x54, 0xc1, 0xdd, 0x1a, 0x29, + 0xd8, 0xf6, 0x4c, 0x75, 0x9d, 0xf3, 0xc6, 0x2c, 0xb2, 0x70, 0xa4, 0x0e, 0x51, 0x93, 0x1b, 0x18, + 0x6b, 0x72, 0xaf, 0x7f, 0x7f, 0xaf, 0xf3, 0xa2, 0x7f, 0xdb, 0x05, 0x89, 0x1e, 0x3a, 0xb1, 0xe6, + 0xeb, 0x82, 0x46, 0x4b, 0xf6, 0x6e, 0xee, 0xa1, 0xce, 0xc2, 0x85, 0xc1, 0x4f, 0x51, 0x33, 0xb6, + 0xbd, 0x7b, 0xb6, 0xf7, 0x39, 0xa9, 0xec, 0xb8, 0xcc, 0xe1, 0x0c, 0x75, 0x47, 0x4c, 0x9b, 0xb7, + 0x03, 0x0d, 0x6a, 0x02, 0xc3, 0x4f, 0xb9, 0x93, 0xf3, 0xa5, 0x3d, 0x5a, 0x12, 0xa4, 0x35, 0x0b, + 0xfc, 0x2d, 0x6a, 0x6a, 0xc3, 0x0c, 0xd8, 0xa3, 0xd9, 0x8e, 0x3e, 0x9f, 0xd5, 0x76, 0x52, 0x04, + 0xff, 0x9d, 0xf6, 0xba, 0x0b, 0x8d, 0xd8, 0x18, 0x2d, 0xf1, 0xd1, 0xf3, 0xcb, 0x2b, 0xbf, 0xf1, + 0xe1, 0xca, 0x6f, 0x7c, 0xbc, 0xf2, 0x1b, 0xbf, 0xe6, 0xbe, 0x77, 0x99, 0xfb, 0xde, 0x87, 0xdc, + 0xf7, 0x3e, 0xe6, 0xbe, 0xf7, 0x77, 0xee, 0x7b, 0x7f, 0xfc, 0xe3, 0x37, 0x7e, 0x5a, 0x77, 0xf3, + 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x69, 0xa9, 0x7b, 0x6e, 0xf2, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto new file mode 100644 index 00000000..c13a12a5 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.events.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +message Event { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required. Time when this Event was first observed. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 3; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingController = 4; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 5; + + // What action was taken/failed regarding to the regarding object. + // +optional + optional string action = 6; + + // Why the action was taken. + optional string reason = 7; + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + optional k8s.io.api.core.v1.ObjectReference regarding = 8; + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + optional k8s.io.api.core.v1.ObjectReference related = 9; + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + optional string note = 10; + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + optional string type = 11; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional int32 deprecatedCount = 15; +} + +// EventList is a list of Event objects. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time when last Event from the series was seen before last heartbeat. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // Information whether this series is ongoing or finished. + optional string state = 3; +} + diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go new file mode 100644 index 00000000..45067829 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go new file mode 100644 index 00000000..1b68bd74 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +type Event struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required. Time when this Event was first observed. + EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + + // What action was taken/failed regarding to the regarding object. + // +optional + Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + + // Why the action was taken. + Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` + // Time when last Event from the series was seen before last heartbeat. + LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` + // Information whether this series is ongoing or finished. + State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of Event objects. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..04a4a912 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "eventTime": "Required. Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "action": "What action was taken/failed regarding to the regarding object.", + "reason": "Why the action was taken.", + "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "Type of this event (Normal, Warning), new types could be added in the future.", + "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of Event objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", + "state": "Information whether this series is ongoing or finished.", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..2886eba1 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + out.Regarding = in.Regarding + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(v1.ObjectReference) + **out = **in + } + } + out.DeprecatedSource = in.DeprecatedSource + in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) + in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index ac174dd2..8ce18304 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index d508216a..ba5998e2 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -25,13 +25,14 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - APIVersion + AllowedFlexVolume AllowedHostPath CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget CustomMetricTargetList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -82,10 +83,6 @@ limitations under the License. ScaleSpec ScaleStatus SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList */ package v1beta1 @@ -117,9 +114,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (*APIVersion) ProtoMessage() {} -func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} @@ -149,246 +146,233 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{11} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{33} + return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{48} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptorGenerated, []int{51} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{52} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptorGenerated, []int{57} } -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (*ThirdPartyResourceDataList) ProtoMessage() {} -func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{59} -} - -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } - func init() { - proto.RegisterType((*APIVersion)(nil), "k8s.io.api.extensions.v1beta1.APIVersion") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") @@ -439,12 +423,8 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceList") } -func (m *APIVersion) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -454,15 +434,15 @@ func (m *APIVersion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *APIVersion) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) return i, nil } @@ -650,6 +630,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -668,11 +690,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -707,28 +729,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n9, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -787,6 +809,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -813,11 +847,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n10, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -840,27 +874,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n12 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n13, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 return i, nil } @@ -898,19 +932,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n14, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n15, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n15 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 return i, nil } @@ -932,11 +966,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(dAtA[i:]) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -996,11 +1030,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 return i, nil } @@ -1028,28 +1062,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1070,11 +1104,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n21, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1160,11 +1194,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } return i, nil } @@ -1225,11 +1259,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n23, err := m.Backend.MarshalTo(dAtA[i:]) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 return i, nil } @@ -1366,27 +1400,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n24, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n25, err := m.Spec.MarshalTo(dAtA[i:]) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n25 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n26, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n26 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n27, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 return i, nil } @@ -1412,11 +1446,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n27, err := m.ServicePort.MarshalTo(dAtA[i:]) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 return i, nil } @@ -1438,11 +1472,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(dAtA[i:]) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1480,11 +1514,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n29, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 return i, nil } @@ -1507,11 +1541,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n30, err := m.HTTP.MarshalTo(dAtA[i:]) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } return i, nil } @@ -1535,11 +1569,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n31, err := m.Backend.MarshalTo(dAtA[i:]) + n32, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1586,11 +1620,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n32, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 return i, nil } @@ -1649,19 +1683,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n35, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 return i, nil } @@ -1767,11 +1801,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n35, err := m.ListMeta.MarshalTo(dAtA[i:]) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1806,32 +1840,32 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n36, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n37, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 } - if m.IPBlock != nil { - dAtA[i] = 0x1a + if m.NamespaceSelector != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n38, err := m.IPBlock.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n38 } + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } return i, nil } @@ -1860,11 +1894,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n39, err := m.Port.MarshalTo(dAtA[i:]) + n40, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 } return i, nil } @@ -1887,11 +1921,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n40, err := m.PodSelector.MarshalTo(dAtA[i:]) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1952,19 +1986,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n41, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n42, err := m.Spec.MarshalTo(dAtA[i:]) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n42 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n43, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 return i, nil } @@ -1986,11 +2020,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n43, err := m.ListMeta.MarshalTo(dAtA[i:]) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2128,35 +2162,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n44, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n45, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n45 - dAtA[i] = 0x62 + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n46, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n46 - dAtA[i] = 0x6a + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n47, err := m.FSGroup.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n47 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2201,6 +2235,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -2222,27 +2270,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(dAtA[i:]) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n49 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n50, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n50, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n50 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n51, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 return i, nil } @@ -2272,11 +2320,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n52 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -2306,11 +2354,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n52, err := m.ListMeta.MarshalTo(dAtA[i:]) + n53, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n53 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2350,20 +2398,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n53, err := m.Selector.MarshalTo(dAtA[i:]) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n54, err := m.Template.MarshalTo(dAtA[i:]) + n55, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -2473,11 +2521,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n56 } return i, nil } @@ -2501,21 +2549,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n57 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n58 } return i, nil } @@ -2577,11 +2625,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } return i, nil } @@ -2604,27 +2652,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(dAtA[i:]) + n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n60 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n61, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n61 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n62, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 return i, nil } @@ -2730,156 +2778,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func (m *ThirdPartyResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceData) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ThirdPartyResourceDataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceDataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -2907,10 +2805,10 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *APIVersion) Size() (n int) { +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2979,6 +2877,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -3026,6 +2940,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3531,6 +3451,12 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3722,62 +3648,6 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { return n } -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -3791,12 +3661,12 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *APIVersion) String() string { +func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&APIVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `}`, }, "") return s @@ -3865,6 +3735,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -3905,6 +3789,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4303,6 +4188,7 @@ func (this *PodSecurityPolicySpec) String() string { `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4489,51 +4375,6 @@ func (this *SupplementalGroupsStrategyOptions) String() string { }, "") return s } -func (this *ThirdPartyResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceData{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceDataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceDataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -4542,7 +4383,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *APIVersion) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4565,15 +4406,15 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4598,7 +4439,7 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5220,6 +5061,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5733,6 +5770,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10207,6 +10275,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12076,479 +12175,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ThirdPartyResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -12659,232 +12285,229 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3632 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5b, 0xcd, 0x6f, 0x24, 0xc7, - 0x75, 0xdf, 0x9e, 0x0f, 0xce, 0xf0, 0x71, 0xf9, 0x55, 0x5c, 0x91, 0x63, 0x4a, 0xcb, 0x59, 0xb7, - 0x80, 0xcd, 0x4a, 0x91, 0x66, 0xb4, 0x94, 0x56, 0x56, 0x24, 0xc4, 0x0e, 0x87, 0xdc, 0x0f, 0x3a, - 0xfc, 0x18, 0xd5, 0x0c, 0x69, 0x67, 0x91, 0x75, 0xd4, 0x9c, 0x29, 0x0e, 0x7b, 0xd9, 0xd3, 0xdd, - 0xee, 0xae, 0xa6, 0x39, 0x97, 0x20, 0x87, 0xc0, 0x40, 0x80, 0x04, 0x49, 0x0e, 0x0e, 0x94, 0x5b, - 0x7c, 0xc9, 0x29, 0x41, 0x7c, 0x4b, 0x0e, 0x86, 0x81, 0x00, 0x0e, 0xb0, 0x08, 0x9c, 0xc0, 0xa7, - 0xc4, 0x27, 0x22, 0xa2, 0x8e, 0xf9, 0x07, 0x82, 0x3d, 0x04, 0x41, 0x55, 0x57, 0x7f, 0x77, 0x73, - 0x66, 0xa8, 0x25, 0x11, 0xf8, 0x36, 0x5d, 0xef, 0xbd, 0xdf, 0x7b, 0xf5, 0xaa, 0xea, 0xbd, 0x57, - 0x1f, 0x03, 0x8f, 0x8e, 0x3f, 0xb2, 0x6b, 0xaa, 0x51, 0x3f, 0x76, 0x0e, 0x88, 0xa5, 0x13, 0x4a, - 0xec, 0xfa, 0x09, 0xd1, 0xbb, 0x86, 0x55, 0x17, 0x04, 0xc5, 0x54, 0xeb, 0xe4, 0x94, 0x12, 0xdd, - 0x56, 0x0d, 0xdd, 0xae, 0x9f, 0xdc, 0x3f, 0x20, 0x54, 0xb9, 0x5f, 0xef, 0x11, 0x9d, 0x58, 0x0a, - 0x25, 0xdd, 0x9a, 0x69, 0x19, 0xd4, 0x40, 0xb7, 0x5d, 0xf6, 0x9a, 0x62, 0xaa, 0xb5, 0x80, 0xbd, - 0x26, 0xd8, 0x97, 0xdf, 0xed, 0xa9, 0xf4, 0xc8, 0x39, 0xa8, 0x75, 0x8c, 0x7e, 0xbd, 0x67, 0xf4, - 0x8c, 0x3a, 0x97, 0x3a, 0x70, 0x0e, 0xf9, 0x17, 0xff, 0xe0, 0xbf, 0x5c, 0xb4, 0x65, 0x39, 0xa4, - 0xbc, 0x63, 0x58, 0xa4, 0x7e, 0x92, 0xd0, 0xb8, 0xfc, 0x56, 0x88, 0xc7, 0x34, 0x34, 0xb5, 0x33, - 0xc8, 0x32, 0x6e, 0xf9, 0x83, 0x80, 0xb5, 0xaf, 0x74, 0x8e, 0x54, 0x9d, 0x58, 0x83, 0xba, 0x79, - 0xdc, 0xe3, 0xb2, 0x16, 0xb1, 0x0d, 0xc7, 0xea, 0x90, 0xb1, 0xa4, 0xec, 0x7a, 0x9f, 0x50, 0x25, - 0xcd, 0xac, 0x7a, 0x96, 0x94, 0xe5, 0xe8, 0x54, 0xed, 0x27, 0xd5, 0x7c, 0x38, 0x4c, 0xc0, 0xee, - 0x1c, 0x91, 0xbe, 0x92, 0x90, 0x7b, 0x3f, 0x4b, 0xce, 0xa1, 0xaa, 0x56, 0x57, 0x75, 0x6a, 0x53, - 0x2b, 0x2e, 0x24, 0xd7, 0x00, 0xd6, 0x9a, 0x9b, 0xfb, 0xc4, 0x62, 0xc3, 0x83, 0xee, 0x40, 0x41, - 0x57, 0xfa, 0xa4, 0x22, 0xdd, 0x91, 0xee, 0x4d, 0x36, 0x6e, 0xbe, 0x38, 0xab, 0xde, 0x38, 0x3f, - 0xab, 0x16, 0x76, 0x94, 0x3e, 0xc1, 0x9c, 0x22, 0x3f, 0x84, 0xd9, 0x35, 0x4d, 0x33, 0x7e, 0x40, - 0xba, 0x4f, 0x0c, 0x9b, 0x36, 0x15, 0x7a, 0x84, 0x56, 0x01, 0x4c, 0x85, 0x1e, 0x35, 0x2d, 0x72, - 0xa8, 0x9e, 0x0a, 0x51, 0x24, 0x44, 0xa1, 0xe9, 0x53, 0x70, 0x88, 0x4b, 0xfe, 0x6b, 0x09, 0xbe, - 0xb6, 0xee, 0xd8, 0xd4, 0xe8, 0x6f, 0x13, 0x6a, 0xa9, 0x9d, 0x75, 0xc7, 0xb2, 0x88, 0x4e, 0x5b, - 0x54, 0xa1, 0x8e, 0x3d, 0xdc, 0x0c, 0xf4, 0x14, 0x8a, 0x27, 0x8a, 0xe6, 0x90, 0x4a, 0xee, 0x8e, - 0x74, 0x6f, 0x6a, 0xb5, 0x56, 0x0b, 0x66, 0x9b, 0xdf, 0xf7, 0x9a, 0x79, 0xdc, 0xe3, 0xd3, 0xcf, - 0x1b, 0xd0, 0xda, 0xa7, 0x8e, 0xa2, 0x53, 0x95, 0x0e, 0x1a, 0xb7, 0x04, 0xe4, 0x4d, 0xa1, 0x77, - 0x9f, 0x61, 0x61, 0x17, 0x52, 0xfe, 0x43, 0xb8, 0x9d, 0x69, 0xda, 0x96, 0x6a, 0x53, 0xf4, 0x0c, - 0x8a, 0x2a, 0x25, 0x7d, 0xbb, 0x22, 0xdd, 0xc9, 0xdf, 0x9b, 0x5a, 0xfd, 0xa8, 0x76, 0xe1, 0x54, - 0xaf, 0x65, 0x82, 0x35, 0xa6, 0x85, 0x19, 0xc5, 0x4d, 0x06, 0x87, 0x5d, 0x54, 0xf9, 0x2f, 0x25, - 0x40, 0x61, 0x99, 0xb6, 0x62, 0xf5, 0x08, 0x1d, 0xc1, 0x29, 0xbf, 0xf7, 0xd5, 0x9c, 0xb2, 0x20, - 0x20, 0xa7, 0x5c, 0x85, 0x11, 0x9f, 0x98, 0xb0, 0x98, 0x34, 0x89, 0x3b, 0x63, 0x3f, 0xea, 0x8c, - 0xfb, 0x63, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x8f, 0x72, 0x30, 0xb9, 0xa1, 0x90, 0xbe, 0xa1, - 0xb7, 0x08, 0x45, 0x9f, 0x41, 0x99, 0xad, 0xaf, 0xae, 0x42, 0x15, 0xee, 0x80, 0xa9, 0xd5, 0xf7, - 0x2e, 0xea, 0x9d, 0x5d, 0x63, 0xdc, 0xb5, 0x93, 0xfb, 0xb5, 0xdd, 0x83, 0xe7, 0xa4, 0x43, 0xb7, - 0x09, 0x55, 0x82, 0x39, 0x19, 0xb4, 0x61, 0x1f, 0x15, 0xed, 0x40, 0xc1, 0x36, 0x49, 0x47, 0xf8, - 0xee, 0x9d, 0x21, 0xdd, 0xf0, 0x2d, 0x6b, 0x99, 0xa4, 0x13, 0x0c, 0x06, 0xfb, 0xc2, 0x1c, 0x07, - 0xed, 0xc3, 0x84, 0xcd, 0x47, 0xb9, 0x92, 0x4f, 0x8c, 0xc6, 0xc5, 0x88, 0xee, 0xdc, 0x98, 0x11, - 0x98, 0x13, 0xee, 0x37, 0x16, 0x68, 0xf2, 0x4f, 0x25, 0x98, 0xf6, 0x79, 0xf9, 0x08, 0xfc, 0x7e, - 0xc2, 0x37, 0xb5, 0xd1, 0x7c, 0xc3, 0xa4, 0xb9, 0x67, 0xe6, 0x84, 0xae, 0xb2, 0xd7, 0x12, 0xf2, - 0xcb, 0xb6, 0x37, 0xbe, 0x39, 0x3e, 0xbe, 0xf7, 0x46, 0xed, 0x46, 0xc6, 0xb0, 0xfe, 0x55, 0x21, - 0x64, 0x3e, 0x73, 0x17, 0x7a, 0x06, 0x65, 0x9b, 0x68, 0xa4, 0x43, 0x0d, 0x4b, 0x98, 0xff, 0xfe, - 0x88, 0xe6, 0x2b, 0x07, 0x44, 0x6b, 0x09, 0xd1, 0xc6, 0x4d, 0x66, 0xbf, 0xf7, 0x85, 0x7d, 0x48, - 0xf4, 0x29, 0x94, 0x29, 0xe9, 0x9b, 0x9a, 0x42, 0xbd, 0x75, 0xf1, 0x66, 0xb8, 0x0b, 0x2c, 0x99, - 0x30, 0xb0, 0xa6, 0xd1, 0x6d, 0x0b, 0x36, 0x3e, 0xa4, 0xbe, 0x4b, 0xbc, 0x56, 0xec, 0xc3, 0xa0, - 0x13, 0x98, 0x71, 0xcc, 0x2e, 0xe3, 0xa4, 0x2c, 0x94, 0xf6, 0x06, 0x62, 0x88, 0x3f, 0x1c, 0xd5, - 0x37, 0x7b, 0x11, 0xe9, 0xc6, 0xa2, 0xd0, 0x35, 0x13, 0x6d, 0xc7, 0x31, 0x2d, 0x68, 0x0d, 0x66, - 0xfb, 0xaa, 0x8e, 0x89, 0xd2, 0x1d, 0xb4, 0x48, 0xc7, 0xd0, 0xbb, 0x76, 0xa5, 0x70, 0x47, 0xba, - 0x57, 0x6c, 0x2c, 0x09, 0x80, 0xd9, 0xed, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xb7, 0x01, 0x79, 0xdd, - 0x78, 0xec, 0x66, 0x02, 0xd5, 0xd0, 0x2b, 0xc5, 0x3b, 0xd2, 0xbd, 0x7c, 0x63, 0x59, 0xa0, 0xa0, - 0x76, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x2d, 0xb8, 0x65, 0x91, 0x13, 0x95, 0xf5, 0xf1, 0x89, 0x6a, - 0x53, 0xc3, 0x1a, 0x6c, 0xa9, 0x7d, 0x95, 0x56, 0x26, 0xb8, 0x4d, 0x95, 0xf3, 0xb3, 0xea, 0x2d, - 0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, 0x4f, 0x8a, 0x30, 0x1b, 0x5b, 0x03, 0x68, 0x1f, 0x16, 0x3b, - 0x6e, 0xc0, 0xdc, 0x71, 0xfa, 0x07, 0xc4, 0x6a, 0x75, 0x8e, 0x48, 0xd7, 0xd1, 0x48, 0x97, 0x4f, - 0x94, 0x62, 0x63, 0x45, 0x58, 0xbc, 0xb8, 0x9e, 0xca, 0x85, 0x33, 0xa4, 0x99, 0x17, 0x74, 0xde, - 0xb4, 0xad, 0xda, 0xb6, 0x8f, 0x99, 0xe3, 0x98, 0xbe, 0x17, 0x76, 0x12, 0x1c, 0x38, 0x45, 0x8a, - 0xd9, 0xd8, 0x25, 0xb6, 0x6a, 0x91, 0x6e, 0xdc, 0xc6, 0x7c, 0xd4, 0xc6, 0x8d, 0x54, 0x2e, 0x9c, - 0x21, 0x8d, 0x1e, 0xc0, 0x94, 0xab, 0x8d, 0x8f, 0x9f, 0x18, 0x68, 0x3f, 0x44, 0xef, 0x04, 0x24, - 0x1c, 0xe6, 0x63, 0x5d, 0x33, 0x0e, 0x6c, 0x62, 0x9d, 0x90, 0x6e, 0xf6, 0x00, 0xef, 0x26, 0x38, - 0x70, 0x8a, 0x14, 0xeb, 0x9a, 0x3b, 0x03, 0x13, 0x5d, 0x9b, 0x88, 0x76, 0x6d, 0x2f, 0x95, 0x0b, - 0x67, 0x48, 0xb3, 0x79, 0xec, 0x9a, 0xbc, 0x76, 0xa2, 0xa8, 0x9a, 0x72, 0xa0, 0x91, 0x4a, 0x29, - 0x3a, 0x8f, 0x77, 0xa2, 0x64, 0x1c, 0xe7, 0x47, 0x8f, 0x61, 0xde, 0x6d, 0xda, 0xd3, 0x15, 0x1f, - 0xa4, 0xcc, 0x41, 0xbe, 0x26, 0x40, 0xe6, 0x77, 0xe2, 0x0c, 0x38, 0x29, 0x83, 0x3e, 0x86, 0x99, - 0x8e, 0xa1, 0x69, 0x7c, 0x3e, 0xae, 0x1b, 0x8e, 0x4e, 0x2b, 0x93, 0x1c, 0x05, 0xb1, 0xf5, 0xb8, - 0x1e, 0xa1, 0xe0, 0x18, 0xa7, 0xfc, 0xaf, 0x12, 0x2c, 0x65, 0xac, 0x69, 0xf4, 0x2d, 0x28, 0xd0, - 0x81, 0xe9, 0x65, 0xeb, 0xdf, 0xf4, 0x12, 0x44, 0x7b, 0x60, 0x92, 0x97, 0x67, 0xd5, 0xd7, 0x33, - 0xc4, 0x18, 0x19, 0x73, 0x41, 0xa4, 0xc3, 0xb4, 0xc5, 0xd4, 0xe9, 0x3d, 0x97, 0x45, 0x04, 0xaf, - 0x07, 0x43, 0x62, 0x0c, 0x0e, 0xcb, 0x04, 0xc1, 0x78, 0xfe, 0xfc, 0xac, 0x3a, 0x1d, 0xa1, 0xe1, - 0x28, 0xbc, 0xfc, 0x79, 0x0e, 0x60, 0x83, 0x98, 0x9a, 0x31, 0xe8, 0x13, 0xfd, 0x3a, 0x12, 0xee, - 0x6e, 0x24, 0xe1, 0xbe, 0x3b, 0x2c, 0x76, 0xfa, 0xa6, 0x65, 0x66, 0xdc, 0xef, 0xc4, 0x32, 0x6e, - 0x7d, 0x74, 0xc8, 0x8b, 0x53, 0xee, 0x7f, 0xe6, 0x61, 0x21, 0x60, 0x5e, 0x37, 0xf4, 0xae, 0xca, - 0xd7, 0xc7, 0x27, 0x91, 0x31, 0xfe, 0x8d, 0xd8, 0x18, 0x2f, 0xa5, 0x88, 0x84, 0xc6, 0x77, 0xcb, - 0xb7, 0x36, 0xc7, 0xc5, 0x3f, 0x88, 0x2a, 0x7f, 0x79, 0x56, 0x4d, 0xd9, 0xf3, 0xd4, 0x7c, 0xa4, - 0xa8, 0x89, 0xe8, 0x2e, 0x4c, 0x58, 0x44, 0xb1, 0x0d, 0x9d, 0x07, 0x8a, 0xc9, 0xa0, 0x2b, 0x98, - 0xb7, 0x62, 0x41, 0x45, 0x6f, 0x41, 0xa9, 0x4f, 0x6c, 0x5b, 0xe9, 0x11, 0x1e, 0x13, 0x26, 0x1b, - 0xb3, 0x82, 0xb1, 0xb4, 0xed, 0x36, 0x63, 0x8f, 0x8e, 0x9e, 0xc3, 0x8c, 0xa6, 0xd8, 0x62, 0x82, - 0xb6, 0xd5, 0x3e, 0xe1, 0xab, 0x7e, 0x6a, 0xf5, 0xed, 0xd1, 0xe6, 0x01, 0x93, 0x08, 0x32, 0xdb, - 0x56, 0x04, 0x09, 0xc7, 0x90, 0xd1, 0x09, 0x20, 0xd6, 0xd2, 0xb6, 0x14, 0xdd, 0x76, 0x1d, 0xc5, - 0xf4, 0x95, 0xc6, 0xd6, 0xe7, 0x47, 0xb8, 0xad, 0x04, 0x1a, 0x4e, 0xd1, 0x20, 0xff, 0x4c, 0x82, - 0x99, 0x60, 0x98, 0xae, 0xa1, 0x9a, 0xda, 0x89, 0x56, 0x53, 0x6f, 0x8d, 0x3c, 0x45, 0x33, 0xca, - 0xa9, 0xff, 0xc9, 0x01, 0x0a, 0x98, 0xd8, 0x02, 0x3f, 0x50, 0x3a, 0xc7, 0x23, 0xec, 0x15, 0x7e, - 0x24, 0x01, 0x12, 0xe1, 0x79, 0x4d, 0xd7, 0x0d, 0xca, 0x23, 0xbe, 0x67, 0xd6, 0xe6, 0xc8, 0x66, - 0x79, 0x1a, 0x6b, 0x7b, 0x09, 0xac, 0x87, 0x3a, 0xb5, 0x06, 0xc1, 0x88, 0x24, 0x19, 0x70, 0x8a, - 0x01, 0x48, 0x01, 0xb0, 0x04, 0x66, 0xdb, 0x10, 0x0b, 0xf9, 0xdd, 0x11, 0x62, 0x1e, 0x13, 0x58, - 0x37, 0xf4, 0x43, 0xb5, 0x17, 0x84, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xf2, 0x43, 0x58, 0xca, - 0xb0, 0x16, 0xcd, 0x41, 0xfe, 0x98, 0x0c, 0x5c, 0xb7, 0x61, 0xf6, 0x13, 0xdd, 0x0a, 0xef, 0xa9, - 0x26, 0xc5, 0x76, 0xe8, 0xe3, 0xdc, 0x47, 0x92, 0xfc, 0xd3, 0x62, 0x78, 0xee, 0xf0, 0x52, 0xf6, - 0x1e, 0x94, 0x2d, 0x62, 0x6a, 0x6a, 0x47, 0xb1, 0x45, 0x85, 0xc2, 0xab, 0x52, 0x2c, 0xda, 0xb0, - 0x4f, 0x8d, 0x14, 0xbd, 0xb9, 0xab, 0x2d, 0x7a, 0xf3, 0xaf, 0xa6, 0xe8, 0xfd, 0x03, 0x28, 0xdb, - 0x5e, 0xb9, 0x5b, 0xe0, 0x90, 0xf7, 0xc7, 0x88, 0xaf, 0xa2, 0xd2, 0xf5, 0x15, 0xf8, 0x35, 0xae, - 0x0f, 0x9a, 0x56, 0xdd, 0x16, 0xc7, 0xac, 0x6e, 0x5f, 0x69, 0x45, 0xca, 0x62, 0xaa, 0xa9, 0x38, - 0x36, 0xe9, 0xf2, 0x40, 0x54, 0x0e, 0x62, 0x6a, 0x93, 0xb7, 0x62, 0x41, 0x45, 0xcf, 0x22, 0x53, - 0xb6, 0x7c, 0x99, 0x29, 0x3b, 0x93, 0x3d, 0x5d, 0xd1, 0x1e, 0x2c, 0x99, 0x96, 0xd1, 0xb3, 0x88, - 0x6d, 0x6f, 0x10, 0xa5, 0xab, 0xa9, 0x3a, 0xf1, 0xfc, 0xe3, 0x96, 0x2a, 0xaf, 0x9f, 0x9f, 0x55, - 0x97, 0x9a, 0xe9, 0x2c, 0x38, 0x4b, 0x56, 0x7e, 0x51, 0x80, 0xb9, 0x78, 0x06, 0xcc, 0xa8, 0x1e, - 0xa5, 0x4b, 0x55, 0x8f, 0xef, 0x84, 0x16, 0x83, 0x5b, 0x5a, 0xfb, 0xa3, 0x9f, 0xb2, 0x20, 0xd6, - 0x60, 0x56, 0x44, 0x03, 0x8f, 0x28, 0xea, 0x67, 0x7f, 0xf4, 0xf7, 0xa2, 0x64, 0x1c, 0xe7, 0x67, - 0x35, 0x61, 0x50, 0xea, 0x79, 0x20, 0x85, 0x68, 0x4d, 0xb8, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xb4, - 0x0d, 0x0b, 0x8e, 0x9e, 0x84, 0x72, 0x67, 0xe3, 0xeb, 0x02, 0x6a, 0x61, 0x2f, 0xc9, 0x82, 0xd3, - 0xe4, 0xd0, 0x21, 0x40, 0xc7, 0x4b, 0xdb, 0x76, 0x65, 0x82, 0x47, 0xd8, 0xd5, 0x91, 0xd7, 0x8e, - 0x9f, 0xf1, 0x83, 0xb8, 0xe6, 0x37, 0xd9, 0x38, 0x84, 0x8c, 0x3e, 0x81, 0x69, 0x8b, 0x6f, 0x08, - 0x3c, 0x83, 0xdd, 0xa2, 0xfa, 0x35, 0x21, 0x36, 0x8d, 0xc3, 0x44, 0x1c, 0xe5, 0x4d, 0xa9, 0x83, - 0xcb, 0x23, 0xd7, 0xc1, 0xff, 0x2c, 0x85, 0x93, 0x90, 0x5f, 0x02, 0x7f, 0x1c, 0x29, 0x8f, 0xee, - 0xc6, 0xca, 0xa3, 0xc5, 0xa4, 0x44, 0xa8, 0x3a, 0x32, 0xd2, 0xab, 0xdf, 0x0f, 0xc7, 0xaa, 0x7e, - 0x83, 0xe4, 0x39, 0xbc, 0xfc, 0xfd, 0xb1, 0x04, 0x8b, 0x8f, 0x5a, 0x8f, 0x2d, 0xc3, 0x31, 0x3d, - 0x73, 0x76, 0x4d, 0xd7, 0xaf, 0xdf, 0x80, 0x82, 0xe5, 0x68, 0x5e, 0x3f, 0xde, 0xf4, 0xfa, 0x81, - 0x1d, 0x8d, 0xf5, 0x63, 0x21, 0x26, 0xe5, 0x76, 0x82, 0x09, 0xa0, 0x1d, 0x98, 0xb0, 0x14, 0xbd, - 0x47, 0xbc, 0xb4, 0x7a, 0x77, 0x88, 0xf5, 0x9b, 0x1b, 0x98, 0xb1, 0x87, 0x8a, 0x37, 0x2e, 0x8d, - 0x05, 0x8a, 0xfc, 0x67, 0x12, 0xcc, 0x3e, 0x69, 0xb7, 0x9b, 0x9b, 0x3a, 0x5f, 0xd1, 0xfc, 0xf0, - 0xf5, 0x0e, 0x14, 0x4c, 0x85, 0x1e, 0xc5, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0xdf, 0x85, 0x12, - 0x8b, 0x24, 0x44, 0xef, 0x8e, 0x58, 0x6a, 0x0b, 0xf8, 0x86, 0x2b, 0x14, 0x54, 0x88, 0xa2, 0x01, - 0x7b, 0x70, 0xf2, 0x31, 0xdc, 0x0a, 0x99, 0xc3, 0xfc, 0xc1, 0xcf, 0x0c, 0x51, 0x0b, 0x8a, 0x4c, - 0xb3, 0x77, 0x24, 0x38, 0xec, 0xe4, 0x2b, 0xd6, 0xa5, 0xa0, 0xd2, 0x61, 0x5f, 0x36, 0x76, 0xb1, - 0xe4, 0x6d, 0x98, 0xe6, 0x27, 0xce, 0x86, 0x45, 0xb9, 0x5b, 0xd0, 0x6d, 0xc8, 0xf7, 0x55, 0x5d, - 0xe4, 0xd9, 0x29, 0x21, 0x93, 0x67, 0x39, 0x82, 0xb5, 0x73, 0xb2, 0x72, 0x2a, 0x22, 0x4f, 0x40, - 0x56, 0x4e, 0x31, 0x6b, 0x97, 0x1f, 0x43, 0x49, 0xb8, 0x3b, 0x0c, 0x94, 0xbf, 0x18, 0x28, 0x9f, - 0x02, 0xb4, 0x0b, 0xa5, 0xcd, 0x66, 0x43, 0x33, 0xdc, 0xaa, 0xab, 0xa3, 0x76, 0xad, 0xf8, 0x58, - 0xac, 0x6f, 0x6e, 0x60, 0xcc, 0x29, 0x48, 0x86, 0x09, 0x72, 0xda, 0x21, 0x26, 0xe5, 0x33, 0x62, - 0xb2, 0x01, 0x6c, 0x94, 0x1f, 0xf2, 0x16, 0x2c, 0x28, 0xf2, 0x9f, 0xe7, 0xa0, 0x24, 0xdc, 0x71, - 0x0d, 0xbb, 0xb0, 0xad, 0xc8, 0x2e, 0xec, 0xed, 0xd1, 0xa6, 0x46, 0xe6, 0x16, 0xac, 0x1d, 0xdb, - 0x82, 0xbd, 0x33, 0x22, 0xde, 0xc5, 0xfb, 0xaf, 0x9f, 0x48, 0x30, 0x13, 0x9d, 0x94, 0xe8, 0x01, - 0x4c, 0xb1, 0x84, 0xa3, 0x76, 0xc8, 0x4e, 0x50, 0xe7, 0xfa, 0xa7, 0x23, 0xad, 0x80, 0x84, 0xc3, - 0x7c, 0xa8, 0xe7, 0x8b, 0xb1, 0x79, 0x24, 0x3a, 0x9d, 0xed, 0x52, 0x87, 0xaa, 0x5a, 0xcd, 0xbd, - 0x38, 0xa9, 0x6d, 0xea, 0x74, 0xd7, 0x6a, 0x51, 0x4b, 0xd5, 0x7b, 0x09, 0x45, 0x7c, 0x52, 0x86, - 0x91, 0xe5, 0x7f, 0x92, 0x60, 0x4a, 0x98, 0x7c, 0x0d, 0xbb, 0x8a, 0xdf, 0x8d, 0xee, 0x2a, 0xee, - 0x8e, 0xb8, 0xc0, 0xd3, 0xb7, 0x14, 0x7f, 0x1b, 0x98, 0xce, 0x96, 0x34, 0x9b, 0xd5, 0x47, 0x86, - 0x4d, 0xe3, 0xb3, 0x9a, 0x2d, 0x46, 0xcc, 0x29, 0xc8, 0x81, 0x39, 0x35, 0x16, 0x03, 0x84, 0x6b, - 0xeb, 0xa3, 0x59, 0xe2, 0x8b, 0x35, 0x2a, 0x02, 0x7e, 0x2e, 0x4e, 0xc1, 0x09, 0x15, 0x32, 0x81, - 0x04, 0x17, 0xfa, 0x14, 0x0a, 0x47, 0x94, 0x9a, 0x29, 0x07, 0xc9, 0x43, 0x22, 0x4f, 0x60, 0x42, - 0x99, 0xf7, 0xae, 0xdd, 0x6e, 0x62, 0x0e, 0x25, 0xff, 0x6f, 0xe0, 0x8f, 0x96, 0x3b, 0xc7, 0xfd, - 0x78, 0x2a, 0x5d, 0x26, 0x9e, 0x4e, 0xa5, 0xc5, 0x52, 0xf4, 0x04, 0xf2, 0x54, 0x1b, 0x75, 0x5b, - 0x28, 0x10, 0xdb, 0x5b, 0xad, 0x20, 0x20, 0xb5, 0xb7, 0x5a, 0x98, 0x41, 0xa0, 0x5d, 0x28, 0xb2, - 0xec, 0xc3, 0x96, 0x60, 0x7e, 0xf4, 0x25, 0xcd, 0xfa, 0x1f, 0x4c, 0x08, 0xf6, 0x65, 0x63, 0x17, - 0x47, 0xfe, 0x3e, 0x4c, 0x47, 0xd6, 0x29, 0xfa, 0x0c, 0x6e, 0x6a, 0x86, 0xd2, 0x6d, 0x28, 0x9a, - 0xa2, 0x77, 0x88, 0x77, 0x6a, 0x7f, 0x37, 0x6d, 0x87, 0xb1, 0x15, 0xe2, 0x13, 0xab, 0xdc, 0xbf, - 0x7b, 0x0b, 0xd3, 0x70, 0x04, 0x51, 0x56, 0x00, 0x82, 0x3e, 0xa2, 0x2a, 0x14, 0xd9, 0x3c, 0x73, - 0xf3, 0xc9, 0x64, 0x63, 0x92, 0x59, 0xc8, 0xa6, 0x9f, 0x8d, 0xdd, 0x76, 0xb4, 0x0a, 0x60, 0x93, - 0x8e, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xe8, 0x0d, 0x64, 0xcb, 0xa7, 0xe0, 0x10, 0x97, 0xfc, 0x2f, - 0x12, 0x4c, 0xef, 0x10, 0xfa, 0x03, 0xc3, 0x3a, 0x6e, 0xf2, 0xcb, 0xe2, 0x6b, 0x08, 0xb6, 0x38, - 0x12, 0x6c, 0xdf, 0x1b, 0x32, 0x32, 0x11, 0xeb, 0xb2, 0x42, 0xae, 0xfc, 0x33, 0x09, 0x96, 0x22, - 0x9c, 0x0f, 0x83, 0xa5, 0xbb, 0x07, 0x45, 0xd3, 0xb0, 0xa8, 0x97, 0x88, 0xc7, 0x52, 0xc8, 0xc2, - 0x58, 0x28, 0x15, 0x33, 0x18, 0xec, 0xa2, 0xa1, 0x2d, 0xc8, 0x51, 0x43, 0x4c, 0xd5, 0xf1, 0x30, - 0x09, 0xb1, 0x1a, 0x20, 0x30, 0x73, 0x6d, 0x03, 0xe7, 0xa8, 0xc1, 0x06, 0xa2, 0x12, 0xe1, 0x0a, - 0x07, 0x9f, 0x2b, 0xea, 0x01, 0x86, 0xc2, 0xa1, 0x65, 0xf4, 0x2f, 0xdd, 0x07, 0x7f, 0x20, 0x1e, - 0x59, 0x46, 0x1f, 0x73, 0x2c, 0xf9, 0xe7, 0x12, 0xcc, 0x47, 0x38, 0xaf, 0x21, 0xf0, 0x7f, 0x1a, - 0x0d, 0xfc, 0xef, 0x8c, 0xd3, 0x91, 0x8c, 0xf0, 0xff, 0xf3, 0x5c, 0xac, 0x1b, 0xac, 0xc3, 0xe8, - 0x10, 0xa6, 0x4c, 0xa3, 0xdb, 0x7a, 0x05, 0xf7, 0x74, 0xb3, 0x2c, 0x6f, 0x36, 0x03, 0x2c, 0x1c, - 0x06, 0x46, 0xa7, 0x30, 0xaf, 0x2b, 0x7d, 0x62, 0x9b, 0x4a, 0x87, 0xb4, 0x5e, 0xc1, 0x01, 0xc9, - 0x6b, 0xfc, 0x22, 0x20, 0x8e, 0x88, 0x93, 0x4a, 0xd0, 0x36, 0x94, 0x54, 0x93, 0xd7, 0x71, 0xa2, - 0x76, 0x19, 0x9a, 0x45, 0xdd, 0xaa, 0xcf, 0x8d, 0xe7, 0xe2, 0x03, 0x7b, 0x18, 0xf2, 0xdf, 0xc5, - 0x67, 0x03, 0x9b, 0x7f, 0xe8, 0x31, 0x94, 0xf9, 0xb3, 0x8b, 0x8e, 0xa1, 0x79, 0x37, 0x03, 0x6c, - 0x64, 0x9b, 0xa2, 0xed, 0xe5, 0x59, 0xf5, 0xf5, 0x94, 0x43, 0x5f, 0x8f, 0x8c, 0x7d, 0x61, 0xb4, - 0x03, 0x05, 0xf3, 0xab, 0x54, 0x30, 0x3c, 0xc9, 0xf1, 0xb2, 0x85, 0xe3, 0xc8, 0x7f, 0x9c, 0x8f, - 0x99, 0xcb, 0x53, 0xdd, 0xf3, 0x57, 0x36, 0xea, 0x7e, 0xc5, 0x94, 0x39, 0xf2, 0x07, 0x50, 0x12, - 0x19, 0x5e, 0x4c, 0xe6, 0x6f, 0x8c, 0x33, 0x99, 0xc3, 0x59, 0xcc, 0xdf, 0xb0, 0x78, 0x8d, 0x1e, - 0x30, 0xfa, 0x1e, 0x4c, 0x10, 0x57, 0x85, 0x9b, 0x1b, 0x3f, 0x1c, 0x47, 0x45, 0x10, 0x57, 0x83, - 0x42, 0x55, 0xb4, 0x09, 0x54, 0xf4, 0x2d, 0xe6, 0x2f, 0xc6, 0xcb, 0x36, 0x81, 0x76, 0xa5, 0xc0, - 0xd3, 0xd5, 0x6d, 0xb7, 0xdb, 0x7e, 0xf3, 0xcb, 0xb3, 0x2a, 0x04, 0x9f, 0x38, 0x2c, 0x21, 0xff, - 0x9b, 0x04, 0xf3, 0xdc, 0x43, 0x1d, 0xc7, 0x52, 0xe9, 0xe0, 0xda, 0x12, 0xd3, 0x7e, 0x24, 0x31, - 0x7d, 0x30, 0xc4, 0x2d, 0x09, 0x0b, 0x33, 0x93, 0xd3, 0x2f, 0x24, 0x78, 0x2d, 0xc1, 0x7d, 0x0d, - 0x71, 0x71, 0x2f, 0x1a, 0x17, 0xdf, 0x1b, 0xb7, 0x43, 0x19, 0xb1, 0xf1, 0xf3, 0x9b, 0x29, 0xdd, - 0xe1, 0x2b, 0x65, 0x15, 0xc0, 0xb4, 0xd4, 0x13, 0x55, 0x23, 0x3d, 0x71, 0x3b, 0x5d, 0x0e, 0xbd, - 0x81, 0xf2, 0x29, 0x38, 0xc4, 0x85, 0x6c, 0x58, 0xec, 0x92, 0x43, 0xc5, 0xd1, 0xe8, 0x5a, 0xb7, - 0xbb, 0xae, 0x98, 0xca, 0x81, 0xaa, 0xa9, 0x54, 0x15, 0xc7, 0x05, 0x93, 0x8d, 0x4f, 0xdc, 0x5b, - 0xe3, 0x34, 0x8e, 0x97, 0x67, 0xd5, 0xdb, 0x69, 0xb7, 0x43, 0x1e, 0xcb, 0x00, 0x67, 0x40, 0xa3, - 0x01, 0x54, 0x2c, 0xf2, 0x7d, 0x47, 0xb5, 0x48, 0x77, 0xc3, 0x32, 0xcc, 0x88, 0xda, 0x3c, 0x57, - 0xfb, 0xdb, 0xe7, 0x67, 0xd5, 0x0a, 0xce, 0xe0, 0x19, 0xae, 0x38, 0x13, 0x1e, 0x3d, 0x87, 0x05, - 0xc5, 0x7d, 0x3a, 0x16, 0xd1, 0xea, 0xae, 0x92, 0x8f, 0xce, 0xcf, 0xaa, 0x0b, 0x6b, 0x49, 0xf2, - 0x70, 0x85, 0x69, 0xa0, 0xa8, 0x0e, 0xa5, 0x13, 0x43, 0x73, 0xfa, 0xc4, 0xae, 0x14, 0x39, 0x3e, - 0x4b, 0x04, 0xa5, 0x7d, 0xb7, 0xe9, 0xe5, 0x59, 0x75, 0xe2, 0x51, 0x8b, 0xaf, 0x3e, 0x8f, 0x8b, - 0x6d, 0x28, 0x59, 0x2d, 0x29, 0x56, 0x3c, 0x3f, 0x31, 0x2e, 0x07, 0x51, 0xeb, 0x49, 0x40, 0xc2, - 0x61, 0x3e, 0xf4, 0x0c, 0x26, 0x8f, 0xc4, 0xa9, 0x84, 0x5d, 0x29, 0x8d, 0x94, 0x84, 0x23, 0xa7, - 0x18, 0x8d, 0x79, 0xa1, 0x62, 0xd2, 0x6b, 0xb6, 0x71, 0x80, 0x88, 0xde, 0x82, 0x12, 0xff, 0xd8, - 0xdc, 0xe0, 0xc7, 0x71, 0xe5, 0x20, 0xb6, 0x3d, 0x71, 0x9b, 0xb1, 0x47, 0xf7, 0x58, 0x37, 0x9b, - 0xeb, 0xfc, 0x58, 0x38, 0xc6, 0xba, 0xd9, 0x5c, 0xc7, 0x1e, 0x1d, 0x7d, 0x06, 0x25, 0x9b, 0x6c, - 0xa9, 0xba, 0x73, 0x5a, 0x81, 0x91, 0x2e, 0x95, 0x5b, 0x0f, 0x39, 0x77, 0xec, 0x60, 0x2c, 0xd0, - 0x20, 0xe8, 0xd8, 0x83, 0x45, 0x47, 0x30, 0x69, 0x39, 0xfa, 0x9a, 0xbd, 0x67, 0x13, 0xab, 0x32, - 0xc5, 0x75, 0x0c, 0x0b, 0xe7, 0xd8, 0xe3, 0x8f, 0x6b, 0xf1, 0x3d, 0xe4, 0x73, 0xe0, 0x00, 0x1c, - 0xfd, 0xa9, 0x04, 0xc8, 0x76, 0x4c, 0x53, 0x23, 0x7d, 0xa2, 0x53, 0x45, 0xe3, 0x67, 0x71, 0x76, - 0xe5, 0x26, 0xd7, 0xf9, 0x3b, 0xc3, 0xfa, 0x95, 0x10, 0x8c, 0x2b, 0xf7, 0x0f, 0xbd, 0x93, 0xac, - 0x38, 0x45, 0x2f, 0x73, 0xed, 0xa1, 0xcd, 0x7f, 0x57, 0xa6, 0x47, 0x72, 0x6d, 0xfa, 0x99, 0x63, - 0xe0, 0x5a, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x87, 0x45, 0x8b, 0x28, 0xdd, 0x5d, 0x5d, 0x1b, 0x60, - 0xc3, 0xa0, 0x8f, 0x54, 0x8d, 0xd8, 0x03, 0x9b, 0x92, 0x7e, 0x65, 0x86, 0x0f, 0xbb, 0xff, 0x28, - 0x03, 0xa7, 0x72, 0xe1, 0x0c, 0x69, 0xd4, 0x87, 0xaa, 0x17, 0x32, 0xd8, 0x7a, 0xf2, 0x63, 0xd6, - 0x43, 0xbb, 0xa3, 0x68, 0xee, 0x3d, 0xc0, 0x2c, 0x57, 0xf0, 0xe6, 0xf9, 0x59, 0xb5, 0xba, 0x71, - 0x31, 0x2b, 0x1e, 0x86, 0x85, 0xbe, 0x0b, 0x15, 0x25, 0x4b, 0xcf, 0x1c, 0xd7, 0xf3, 0x06, 0x8b, - 0x43, 0x99, 0x0a, 0x32, 0xa5, 0x11, 0x85, 0x39, 0x25, 0xfa, 0x42, 0xd5, 0xae, 0xcc, 0x8f, 0x74, - 0x10, 0x19, 0x7b, 0xd8, 0x1a, 0x1c, 0x46, 0xc4, 0x08, 0x36, 0x4e, 0x68, 0xe0, 0xcf, 0x27, 0xc4, - 0x61, 0xfa, 0xf5, 0xbc, 0x57, 0x1c, 0xef, 0xf9, 0x44, 0x60, 0xda, 0x2b, 0x7b, 0x3e, 0x11, 0x82, - 0xbc, 0xf8, 0xf8, 0xee, 0xbf, 0x73, 0xb0, 0x10, 0x30, 0x8f, 0xfc, 0x7c, 0x22, 0x45, 0xe4, 0xca, - 0x9e, 0x4f, 0xa4, 0xbf, 0x3f, 0xc8, 0x5f, 0xf5, 0xfb, 0x83, 0x2b, 0x78, 0xb6, 0xc1, 0x9f, 0x34, - 0x04, 0xae, 0xfb, 0xff, 0xf7, 0xa4, 0x21, 0xb0, 0x2d, 0xa3, 0xc8, 0xfa, 0x87, 0x5c, 0xb8, 0x03, - 0xbf, 0xf6, 0xf7, 0xea, 0x5f, 0xfd, 0x51, 0xa7, 0xfc, 0x8b, 0x3c, 0xcc, 0xc5, 0x57, 0x63, 0xe4, - 0xfa, 0x55, 0x1a, 0x7a, 0xfd, 0xda, 0x84, 0x5b, 0x87, 0x8e, 0xa6, 0x0d, 0xb8, 0x1b, 0x42, 0x77, - 0xb0, 0xee, 0xf5, 0xc9, 0x1b, 0x42, 0xf2, 0xd6, 0xa3, 0x14, 0x1e, 0x9c, 0x2a, 0x99, 0x71, 0x95, - 0x9c, 0xbf, 0xd4, 0x55, 0x72, 0xe2, 0x66, 0xb3, 0x30, 0xc6, 0xcd, 0x66, 0xea, 0xb5, 0x70, 0xf1, - 0x12, 0xd7, 0xc2, 0x97, 0xb9, 0xc7, 0x4d, 0x09, 0x62, 0xc3, 0xee, 0x71, 0xe5, 0x37, 0x60, 0x59, - 0x88, 0x51, 0x7e, 0xc5, 0xaa, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x6d, 0x38, 0xfd, 0xfe, 0x40, 0xfe, - 0x26, 0xcc, 0x44, 0x1f, 0x0f, 0xb8, 0x23, 0xed, 0xbe, 0x5f, 0x10, 0x97, 0x58, 0xa1, 0x91, 0x76, - 0xdb, 0xb1, 0xcf, 0x21, 0xff, 0x50, 0x82, 0xc5, 0xf4, 0x47, 0x82, 0x48, 0x83, 0x99, 0xbe, 0x72, - 0x1a, 0x7e, 0x51, 0x29, 0x5d, 0xf2, 0x78, 0x81, 0xdf, 0x1a, 0x6f, 0x47, 0xb0, 0x70, 0x0c, 0x5b, - 0xfe, 0x52, 0x82, 0xa5, 0x8c, 0xfb, 0xda, 0xeb, 0xb5, 0x04, 0x3d, 0x85, 0x72, 0x5f, 0x39, 0x6d, - 0x39, 0x56, 0x8f, 0x5c, 0xfa, 0x40, 0x85, 0x47, 0x8c, 0x6d, 0x81, 0x82, 0x7d, 0x3c, 0xf9, 0xc7, - 0x12, 0x54, 0xb2, 0x4a, 0x5b, 0xf4, 0x20, 0x72, 0xb3, 0xfc, 0xf5, 0xd8, 0xcd, 0xf2, 0x7c, 0x42, - 0xee, 0x8a, 0xee, 0x95, 0xff, 0x5e, 0x82, 0xc5, 0xf4, 0x12, 0x1f, 0xbd, 0x1f, 0xb1, 0xb0, 0x1a, - 0xb3, 0x70, 0x36, 0x26, 0x25, 0xec, 0xfb, 0x1e, 0xcc, 0x88, 0x8d, 0x80, 0x80, 0x11, 0x5e, 0x95, - 0xd3, 0x62, 0xa5, 0x80, 0xf0, 0x0a, 0x5f, 0x3e, 0x5e, 0xd1, 0x36, 0x1c, 0x43, 0x93, 0xff, 0x24, - 0x07, 0xc5, 0x56, 0x47, 0xd1, 0xc8, 0x35, 0x94, 0x59, 0xdf, 0x8e, 0x94, 0x59, 0xc3, 0xfe, 0xfd, - 0xc0, 0xad, 0xca, 0xac, 0xb0, 0x70, 0xac, 0xc2, 0x7a, 0x7b, 0x24, 0xb4, 0x8b, 0x8b, 0xab, 0xdf, - 0x82, 0x49, 0x5f, 0xe9, 0x78, 0x31, 0x5f, 0xfe, 0x9b, 0x1c, 0x4c, 0x85, 0x54, 0x8c, 0x99, 0x31, - 0x0e, 0x23, 0x99, 0x76, 0x94, 0xff, 0x41, 0x85, 0x74, 0xd5, 0xbc, 0xdc, 0xea, 0x3e, 0x12, 0x0c, - 0x9e, 0x85, 0x25, 0x53, 0xee, 0x37, 0x61, 0x86, 0xf2, 0xff, 0x09, 0xf9, 0xc7, 0x90, 0x79, 0x3e, - 0x17, 0xfd, 0xa7, 0xa5, 0xed, 0x08, 0x15, 0xc7, 0xb8, 0x97, 0x3f, 0x81, 0xe9, 0x88, 0xb2, 0xb1, - 0xde, 0xf8, 0xfd, 0xa3, 0x04, 0x5f, 0x1f, 0xba, 0x49, 0x44, 0x8d, 0xc8, 0x22, 0xa9, 0xc5, 0x16, - 0xc9, 0x4a, 0x36, 0xc0, 0x15, 0xbe, 0x15, 0xf9, 0x61, 0x0e, 0x50, 0xfb, 0x48, 0xb5, 0xba, 0x4d, - 0xc5, 0xa2, 0x03, 0x2c, 0xfe, 0xec, 0x75, 0x0d, 0x0b, 0xe6, 0x01, 0x4c, 0x75, 0x89, 0xdd, 0xb1, - 0x54, 0xee, 0x1c, 0x51, 0x9d, 0xfb, 0x07, 0x29, 0x1b, 0x01, 0x09, 0x87, 0xf9, 0xd0, 0x77, 0xa0, - 0x7c, 0xe2, 0xfe, 0x09, 0xd1, 0x3b, 0x9c, 0x1d, 0x56, 0x48, 0x06, 0x7f, 0x5b, 0x0c, 0xe6, 0x8f, - 0x68, 0xb0, 0xb1, 0x0f, 0x26, 0x7f, 0x2e, 0xc1, 0x62, 0xd2, 0x11, 0x1b, 0xcc, 0xd4, 0xab, 0x77, - 0xc6, 0x1b, 0x50, 0xe0, 0xe8, 0xcc, 0x0b, 0x37, 0xdd, 0x43, 0x77, 0xa6, 0x19, 0xf3, 0x56, 0xf9, - 0x3f, 0x24, 0x58, 0x4e, 0x37, 0xed, 0x1a, 0xca, 0xf6, 0xa7, 0xd1, 0xb2, 0x7d, 0xd8, 0x39, 0x45, - 0xba, 0x9d, 0x19, 0x25, 0xfc, 0xbf, 0xa7, 0xfa, 0xfc, 0x1a, 0x3a, 0xb5, 0x1f, 0xed, 0xd4, 0xfd, - 0xb1, 0x3b, 0x95, 0xde, 0xa1, 0xc6, 0xbb, 0x2f, 0xbe, 0x58, 0xb9, 0xf1, 0xcb, 0x2f, 0x56, 0x6e, - 0xfc, 0xea, 0x8b, 0x95, 0x1b, 0x7f, 0x74, 0xbe, 0x22, 0xbd, 0x38, 0x5f, 0x91, 0x7e, 0x79, 0xbe, - 0x22, 0xfd, 0xea, 0x7c, 0x45, 0xfa, 0xaf, 0xf3, 0x15, 0xe9, 0x2f, 0xbe, 0x5c, 0xb9, 0xf1, 0xb4, - 0x24, 0x70, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x55, 0xc4, 0x0b, 0x53, 0x44, 0x3d, 0x00, 0x00, + // 3571 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x28, 0x93, 0x63, 0xca, 0xe2, 0xc8, 0x6d, + 0x40, 0x91, 0x1c, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0x3b, 0x1c, 0x52, 0x1f, 0x74, 0xf8, + 0xa5, 0x1a, 0x52, 0x71, 0x8c, 0xc8, 0x71, 0x73, 0xa6, 0x38, 0x6c, 0xb1, 0xa7, 0xbb, 0xdd, 0x5d, + 0x4d, 0x73, 0x80, 0x20, 0xc8, 0x21, 0x08, 0x10, 0x20, 0x41, 0x92, 0x83, 0xf3, 0x71, 0x8b, 0x2f, + 0x39, 0x25, 0x48, 0x6e, 0xc9, 0xc1, 0x30, 0x10, 0xc0, 0x0b, 0x08, 0x0b, 0x2f, 0xe0, 0xdb, 0xfa, + 0x44, 0xac, 0xe9, 0xd3, 0x62, 0xff, 0x81, 0x85, 0x0e, 0x8b, 0x45, 0x55, 0x57, 0x7f, 0x77, 0x73, + 0x66, 0x68, 0x89, 0x58, 0x2c, 0xf6, 0xc6, 0xa9, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0xf5, 0xea, 0xbd, + 0xd7, 0x55, 0x45, 0xb8, 0xb7, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xcf, 0xd9, 0x26, 0x96, 0x4e, + 0x28, 0xb1, 0x6b, 0xfb, 0x44, 0x6f, 0x19, 0x56, 0x4d, 0x10, 0x14, 0x53, 0xad, 0x91, 0x03, 0x4a, + 0x74, 0x5b, 0x35, 0x74, 0xbb, 0xb6, 0x7f, 0x7d, 0x9b, 0x50, 0xe5, 0x7a, 0xad, 0x4d, 0x74, 0x62, + 0x29, 0x94, 0xb4, 0xaa, 0xa6, 0x65, 0x50, 0x03, 0x5d, 0x70, 0xd9, 0xab, 0x8a, 0xa9, 0x56, 0x03, + 0xf6, 0xaa, 0x60, 0x9f, 0xbb, 0xd6, 0x56, 0xe9, 0xae, 0xb3, 0x5d, 0x6d, 0x1a, 0x9d, 0x5a, 0xdb, + 0x68, 0x1b, 0x35, 0x2e, 0xb5, 0xed, 0xec, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x45, 0x9b, 0x93, + 0x43, 0xca, 0x9b, 0x86, 0x45, 0x6a, 0xfb, 0x09, 0x8d, 0x73, 0x57, 0x42, 0x3c, 0xa6, 0xa1, 0xa9, + 0xcd, 0x6e, 0x96, 0x71, 0x73, 0x6f, 0x05, 0xac, 0x1d, 0xa5, 0xb9, 0xab, 0xea, 0xc4, 0xea, 0xd6, + 0xcc, 0xbd, 0x36, 0x97, 0xb5, 0x88, 0x6d, 0x38, 0x56, 0x93, 0x0c, 0x24, 0x65, 0xd7, 0x3a, 0x84, + 0x2a, 0x69, 0x66, 0xd5, 0xb2, 0xa4, 0x2c, 0x47, 0xa7, 0x6a, 0x27, 0xa9, 0xe6, 0x56, 0x2f, 0x01, + 0xbb, 0xb9, 0x4b, 0x3a, 0x4a, 0x42, 0xee, 0xcd, 0x2c, 0x39, 0x87, 0xaa, 0x5a, 0x4d, 0xd5, 0xa9, + 0x4d, 0xad, 0xb8, 0x90, 0x7c, 0x07, 0xa6, 0x16, 0x34, 0xcd, 0xf8, 0x94, 0xb4, 0xee, 0x69, 0xe4, + 0xe0, 0x91, 0xa1, 0x39, 0x1d, 0x82, 0x2e, 0xc1, 0x70, 0xcb, 0x52, 0xf7, 0x89, 0x55, 0x96, 0x2e, + 0x4a, 0x97, 0x47, 0xea, 0xe3, 0x4f, 0x0f, 0x2b, 0x67, 0x8e, 0x0e, 0x2b, 0xc3, 0x4b, 0x7c, 0x14, + 0x0b, 0xaa, 0x7c, 0x17, 0x26, 0x84, 0xf0, 0x03, 0xc3, 0xa6, 0x1b, 0x0a, 0xdd, 0x45, 0x37, 0x00, + 0x4c, 0x85, 0xee, 0x6e, 0x58, 0x64, 0x47, 0x3d, 0x10, 0xe2, 0x48, 0x88, 0xc3, 0x86, 0x4f, 0xc1, + 0x21, 0x2e, 0xf9, 0xdf, 0x24, 0x78, 0x79, 0xd1, 0xb1, 0xa9, 0xd1, 0x59, 0x25, 0xd4, 0x52, 0x9b, + 0x8b, 0x8e, 0x65, 0x11, 0x9d, 0x36, 0xa8, 0x42, 0x1d, 0x1b, 0x5d, 0x84, 0x82, 0xae, 0x74, 0x88, + 0xc0, 0x3a, 0x2b, 0xb0, 0x0a, 0x6b, 0x4a, 0x87, 0x60, 0x4e, 0x41, 0x1f, 0xc2, 0xd0, 0xbe, 0xa2, + 0x39, 0xa4, 0x9c, 0xbb, 0x28, 0x5d, 0x1e, 0xbd, 0x51, 0xad, 0x06, 0xa1, 0xe7, 0x3b, 0xa2, 0x6a, + 0xee, 0xb5, 0x79, 0x2c, 0x7a, 0xab, 0x5b, 0x7d, 0xe8, 0x28, 0x3a, 0x55, 0x69, 0xb7, 0x7e, 0x4e, + 0x40, 0x9e, 0x15, 0x7a, 0x1f, 0x31, 0x2c, 0xec, 0x42, 0xca, 0x7f, 0x09, 0x17, 0x32, 0x4d, 0x5b, + 0x51, 0x6d, 0x8a, 0x1e, 0xc3, 0x90, 0x4a, 0x49, 0xc7, 0x2e, 0x4b, 0x17, 0xf3, 0x97, 0x47, 0x6f, + 0xdc, 0xae, 0x1e, 0x1b, 0xf7, 0xd5, 0x4c, 0xb0, 0xfa, 0x98, 0x30, 0x63, 0x68, 0x99, 0xc1, 0x61, + 0x17, 0x55, 0xfe, 0x27, 0x09, 0x50, 0x58, 0x66, 0x53, 0xb1, 0xda, 0x84, 0xf6, 0xe1, 0x94, 0x3f, + 0xfd, 0x61, 0x4e, 0x99, 0x16, 0x90, 0xa3, 0xae, 0xc2, 0x88, 0x4f, 0x4c, 0x98, 0x49, 0x9a, 0xc4, + 0x9d, 0xf1, 0x28, 0xea, 0x8c, 0xeb, 0x03, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x67, 0x39, 0x18, + 0x59, 0x52, 0x48, 0xc7, 0xd0, 0x1b, 0x84, 0xa2, 0x8f, 0xa1, 0xc4, 0x36, 0x5b, 0x4b, 0xa1, 0x0a, + 0x77, 0xc0, 0xe8, 0x8d, 0x37, 0x8e, 0x9b, 0x9d, 0x5d, 0x65, 0xdc, 0xd5, 0xfd, 0xeb, 0xd5, 0xf5, + 0xed, 0x27, 0xa4, 0x49, 0x57, 0x09, 0x55, 0x82, 0x98, 0x0c, 0xc6, 0xb0, 0x8f, 0x8a, 0xd6, 0xa0, + 0x60, 0x9b, 0xa4, 0x29, 0x7c, 0x77, 0xb5, 0xc7, 0x34, 0x7c, 0xcb, 0x1a, 0x26, 0x69, 0x06, 0x8b, + 0xc1, 0x7e, 0x61, 0x8e, 0x83, 0x1e, 0xc1, 0xb0, 0xcd, 0x57, 0xb9, 0x9c, 0x4f, 0xac, 0xc6, 0xf1, + 0x88, 0x6e, 0x6c, 0xf8, 0x1b, 0xd0, 0xfd, 0x8d, 0x05, 0x9a, 0xfc, 0xf3, 0x1c, 0x20, 0x9f, 0x77, + 0xd1, 0xd0, 0x5b, 0x2a, 0x55, 0x0d, 0x1d, 0xbd, 0x03, 0x05, 0xda, 0x35, 0xbd, 0xe8, 0xb8, 0xe4, + 0x19, 0xb4, 0xd9, 0x35, 0xc9, 0xb3, 0xc3, 0xca, 0x4c, 0x52, 0x82, 0x51, 0x30, 0x97, 0x41, 0x2b, + 0xbe, 0xa9, 0x39, 0x2e, 0xfd, 0x56, 0x54, 0xf5, 0xb3, 0xc3, 0x4a, 0x4a, 0x2e, 0xae, 0xfa, 0x48, + 0x51, 0x03, 0xd1, 0x3e, 0x20, 0x4d, 0xb1, 0xe9, 0xa6, 0xa5, 0xe8, 0xb6, 0xab, 0x49, 0xed, 0x10, + 0xe1, 0x84, 0xd7, 0xfb, 0x5b, 0x34, 0x26, 0x51, 0x9f, 0x13, 0x56, 0xa0, 0x95, 0x04, 0x1a, 0x4e, + 0xd1, 0xc0, 0x32, 0x98, 0x45, 0x14, 0xdb, 0xd0, 0xcb, 0x85, 0x68, 0x06, 0xc3, 0x7c, 0x14, 0x0b, + 0x2a, 0xba, 0x02, 0xc5, 0x0e, 0xb1, 0x6d, 0xa5, 0x4d, 0xca, 0x43, 0x9c, 0x71, 0x42, 0x30, 0x16, + 0x57, 0xdd, 0x61, 0xec, 0xd1, 0xe5, 0x2f, 0x24, 0x18, 0xf3, 0x3d, 0xc7, 0xa3, 0xfd, 0xcf, 0x12, + 0x71, 0x58, 0xed, 0x6f, 0x4a, 0x4c, 0x9a, 0x47, 0xe1, 0xa4, 0xd0, 0x56, 0xf2, 0x46, 0x42, 0x31, + 0xb8, 0xea, 0xed, 0xa5, 0x1c, 0xdf, 0x4b, 0x97, 0xfb, 0x0d, 0x99, 0x8c, 0x2d, 0xf4, 0xcf, 0x85, + 0x90, 0xf9, 0x2c, 0x34, 0xd1, 0x63, 0x28, 0xd9, 0x44, 0x23, 0x4d, 0x6a, 0x58, 0xc2, 0xfc, 0x37, + 0xfb, 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x43, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xbd, 0x5f, 0xd8, 0x87, + 0x44, 0x0f, 0xa1, 0x44, 0x49, 0xc7, 0xd4, 0x14, 0xea, 0xe5, 0xa0, 0xd7, 0xc2, 0x53, 0x60, 0x91, + 0xc3, 0xc0, 0x36, 0x8c, 0xd6, 0xa6, 0x60, 0xe3, 0xdb, 0xc7, 0x77, 0x89, 0x37, 0x8a, 0x7d, 0x18, + 0xb4, 0x0f, 0xe3, 0x8e, 0xd9, 0x62, 0x9c, 0x94, 0xd5, 0xb0, 0x76, 0x57, 0x44, 0xd2, 0xad, 0x7e, + 0x7d, 0xb3, 0x15, 0x91, 0xae, 0xcf, 0x08, 0x5d, 0xe3, 0xd1, 0x71, 0x1c, 0xd3, 0x82, 0x16, 0x60, + 0xa2, 0xa3, 0xea, 0x98, 0x28, 0xad, 0x6e, 0x83, 0x34, 0x0d, 0xbd, 0x65, 0xf3, 0xb0, 0x1a, 0xaa, + 0xcf, 0x0a, 0x80, 0x89, 0xd5, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xfb, 0x80, 0xbc, 0x69, 0xdc, 0x77, + 0x4b, 0xb0, 0x6a, 0xe8, 0x3c, 0xe6, 0xf2, 0x41, 0x70, 0x6f, 0x26, 0x38, 0x70, 0x8a, 0x14, 0x5a, + 0x81, 0x73, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xee, 0x8a, 0xda, 0x51, + 0x69, 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0x9c, 0xc3, 0x29, 0x74, 0x9c, 0x2a, 0x25, 0xff, + 0xcb, 0x30, 0x4c, 0xc4, 0xf2, 0x0d, 0x7a, 0x04, 0x33, 0x4d, 0xb7, 0x38, 0xad, 0x39, 0x9d, 0x6d, + 0x62, 0x35, 0x9a, 0xbb, 0xa4, 0xe5, 0x68, 0xa4, 0xc5, 0x03, 0x65, 0xa8, 0x3e, 0x2f, 0x2c, 0x9e, + 0x59, 0x4c, 0xe5, 0xc2, 0x19, 0xd2, 0xcc, 0x0b, 0x3a, 0x1f, 0x5a, 0x55, 0x6d, 0xdb, 0xc7, 0xcc, + 0x71, 0x4c, 0xdf, 0x0b, 0x6b, 0x09, 0x0e, 0x9c, 0x22, 0xc5, 0x6c, 0x6c, 0x11, 0x5b, 0xb5, 0x48, + 0x2b, 0x6e, 0x63, 0x3e, 0x6a, 0xe3, 0x52, 0x2a, 0x17, 0xce, 0x90, 0x46, 0x37, 0x61, 0xd4, 0xd5, + 0xc6, 0xd7, 0x4f, 0x2c, 0xb4, 0x5f, 0x0e, 0xd7, 0x02, 0x12, 0x0e, 0xf3, 0xb1, 0xa9, 0x19, 0xdb, + 0x36, 0xb1, 0xf6, 0x49, 0x2b, 0x7b, 0x81, 0xd7, 0x13, 0x1c, 0x38, 0x45, 0x8a, 0x4d, 0xcd, 0x8d, + 0xc0, 0xc4, 0xd4, 0x86, 0xa3, 0x53, 0xdb, 0x4a, 0xe5, 0xc2, 0x19, 0xd2, 0x2c, 0x8e, 0x5d, 0x93, + 0x17, 0xf6, 0x15, 0x55, 0x53, 0xb6, 0x35, 0x52, 0x2e, 0x46, 0xe3, 0x78, 0x2d, 0x4a, 0xc6, 0x71, + 0x7e, 0x74, 0x1f, 0xa6, 0xdc, 0xa1, 0x2d, 0x5d, 0xf1, 0x41, 0x4a, 0x1c, 0xe4, 0x65, 0x01, 0x32, + 0xb5, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xf4, 0x0e, 0x8c, 0x37, 0x0d, 0x4d, 0xe3, 0xf1, 0xb8, 0x68, + 0x38, 0x3a, 0x2d, 0x8f, 0x70, 0x14, 0xc4, 0xf6, 0xe3, 0x62, 0x84, 0x82, 0x63, 0x9c, 0x88, 0x00, + 0x34, 0xbd, 0x82, 0x63, 0x97, 0xa1, 0xaf, 0x5e, 0x23, 0x59, 0xf4, 0x82, 0x1e, 0xc0, 0x1f, 0xb2, + 0x71, 0x08, 0x58, 0xfe, 0xb1, 0x04, 0xb3, 0x19, 0xa9, 0x03, 0xbd, 0x17, 0x29, 0xb1, 0xbf, 0x1f, + 0x2b, 0xb1, 0xe7, 0x33, 0xc4, 0x42, 0x75, 0x56, 0x87, 0x31, 0x8b, 0xcd, 0x4a, 0x6f, 0xbb, 0x2c, + 0x22, 0x47, 0xde, 0xec, 0x31, 0x0d, 0x1c, 0x96, 0x09, 0x72, 0xfe, 0xd4, 0xd1, 0x61, 0x65, 0x2c, + 0x42, 0xc3, 0x51, 0x78, 0xf9, 0x5f, 0x73, 0x00, 0x4b, 0xc4, 0xd4, 0x8c, 0x6e, 0x87, 0xe8, 0xa7, + 0xd1, 0x43, 0xad, 0x47, 0x7a, 0xa8, 0x6b, 0xbd, 0x96, 0xc7, 0x37, 0x2d, 0xb3, 0x89, 0xfa, 0x93, + 0x58, 0x13, 0x55, 0xeb, 0x1f, 0xf2, 0xf8, 0x2e, 0xea, 0xa7, 0x79, 0x98, 0x0e, 0x98, 0x83, 0x36, + 0xea, 0x4e, 0x64, 0x8d, 0x7f, 0x2f, 0xb6, 0xc6, 0xb3, 0x29, 0x22, 0x2f, 0xac, 0x8f, 0x7a, 0xfe, + 0xfd, 0x0c, 0x7a, 0x02, 0xe3, 0xac, 0x71, 0x72, 0xc3, 0x83, 0xb7, 0x65, 0xc3, 0x03, 0xb7, 0x65, + 0x7e, 0x01, 0x5d, 0x89, 0x20, 0xe1, 0x18, 0x72, 0x46, 0x1b, 0x58, 0x7c, 0xd1, 0x6d, 0xa0, 0xfc, + 0xa5, 0x04, 0xe3, 0xc1, 0x32, 0x9d, 0x42, 0xd3, 0xb6, 0x16, 0x6d, 0xda, 0xae, 0xf4, 0x1d, 0xa2, + 0x19, 0x5d, 0xdb, 0x2f, 0x59, 0x83, 0xef, 0x33, 0xb1, 0x0d, 0xbe, 0xad, 0x34, 0xf7, 0xfa, 0xf8, + 0xfc, 0xfb, 0x4c, 0x02, 0x24, 0xaa, 0xc0, 0x82, 0xae, 0x1b, 0x54, 0x71, 0x73, 0xa5, 0x6b, 0xd6, + 0x72, 0xdf, 0x66, 0x79, 0x1a, 0xab, 0x5b, 0x09, 0xac, 0xbb, 0x3a, 0xb5, 0xba, 0xc1, 0x8a, 0x24, + 0x19, 0x70, 0x8a, 0x01, 0x48, 0x01, 0xb0, 0x04, 0xe6, 0xa6, 0x21, 0x36, 0xf2, 0xb5, 0x3e, 0x72, + 0x1e, 0x13, 0x58, 0x34, 0xf4, 0x1d, 0xb5, 0x1d, 0xa4, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xdc, + 0x5d, 0x98, 0xcd, 0xb0, 0x16, 0x4d, 0x42, 0x7e, 0x8f, 0x74, 0x5d, 0xb7, 0x61, 0xf6, 0x27, 0x3a, + 0x17, 0xfe, 0x4c, 0x1e, 0x11, 0x5f, 0xb8, 0xef, 0xe4, 0x6e, 0x4b, 0xf2, 0x17, 0x43, 0xe1, 0xd8, + 0xe1, 0x1d, 0xf3, 0x65, 0x28, 0x59, 0xc4, 0xd4, 0xd4, 0xa6, 0x62, 0x8b, 0x46, 0x88, 0x37, 0xbf, + 0x58, 0x8c, 0x61, 0x9f, 0x1a, 0xe9, 0xad, 0x73, 0x2f, 0xb6, 0xb7, 0xce, 0x3f, 0x9f, 0xde, 0xfa, + 0xcf, 0xa1, 0x64, 0x7b, 0x5d, 0x75, 0x81, 0x43, 0x5e, 0x1f, 0x20, 0xbf, 0x8a, 0x86, 0xda, 0x57, + 0xe0, 0xb7, 0xd2, 0x3e, 0x68, 0x5a, 0x13, 0x3d, 0x34, 0x60, 0x13, 0xfd, 0x5c, 0x1b, 0x5f, 0x96, + 0x53, 0x4d, 0xc5, 0xb1, 0x49, 0x8b, 0x27, 0xa2, 0x52, 0x90, 0x53, 0x37, 0xf8, 0x28, 0x16, 0x54, + 0xf4, 0x38, 0x12, 0xb2, 0xa5, 0x93, 0x84, 0xec, 0x78, 0x76, 0xb8, 0xa2, 0x2d, 0x98, 0x35, 0x2d, + 0xa3, 0x6d, 0x11, 0xdb, 0x5e, 0x22, 0x4a, 0x4b, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0xed, 0x88, 0xce, + 0x1f, 0x1d, 0x56, 0x66, 0x37, 0xd2, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xb4, 0x00, 0x93, 0xf1, 0x0a, + 0x98, 0xd1, 0xa4, 0x4a, 0x27, 0x6a, 0x52, 0xaf, 0x86, 0x36, 0x83, 0xdb, 0xc1, 0xfb, 0xab, 0x9f, + 0xb2, 0x21, 0x16, 0x60, 0x42, 0x64, 0x03, 0x8f, 0x28, 0xda, 0x74, 0x7f, 0xf5, 0xb7, 0xa2, 0x64, + 0x1c, 0xe7, 0x67, 0xad, 0x67, 0xd0, 0x51, 0x7a, 0x20, 0x85, 0x68, 0xeb, 0xb9, 0x10, 0x67, 0xc0, + 0x49, 0x19, 0xb4, 0x0a, 0xd3, 0x8e, 0x9e, 0x84, 0x72, 0xa3, 0xf1, 0xbc, 0x80, 0x9a, 0xde, 0x4a, + 0xb2, 0xe0, 0x34, 0x39, 0xb4, 0x13, 0xe9, 0x46, 0x87, 0x79, 0x86, 0xbd, 0xd1, 0xf7, 0xde, 0xe9, + 0xbb, 0x1d, 0x45, 0x77, 0x60, 0xcc, 0xe2, 0xdf, 0x1d, 0x9e, 0xc1, 0x6e, 0xef, 0xfe, 0x92, 0x10, + 0x1b, 0xc3, 0x61, 0x22, 0x8e, 0xf2, 0xa6, 0xb4, 0xdb, 0xa5, 0x7e, 0xdb, 0x6d, 0xf9, 0xff, 0xa5, + 0x70, 0x11, 0xf2, 0x5b, 0xe0, 0x5e, 0xa7, 0x4c, 0x09, 0x89, 0x50, 0x77, 0x64, 0xa4, 0x77, 0xbf, + 0xb7, 0x06, 0xea, 0x7e, 0x83, 0xe2, 0xd9, 0xbb, 0xfd, 0xfd, 0x5c, 0x82, 0x99, 0x7b, 0x8d, 0xfb, + 0x96, 0xe1, 0x98, 0x9e, 0x39, 0xeb, 0xa6, 0xeb, 0xd7, 0xb7, 0xa1, 0x60, 0x39, 0x9a, 0x37, 0x8f, + 0xd7, 0xbc, 0x79, 0x60, 0x47, 0x63, 0xf3, 0x98, 0x8e, 0x49, 0xb9, 0x93, 0x60, 0x02, 0x68, 0x0d, + 0x86, 0x2d, 0x45, 0x6f, 0x13, 0xaf, 0xac, 0x5e, 0xea, 0x61, 0xfd, 0xf2, 0x12, 0x66, 0xec, 0xa1, + 0xe6, 0x8d, 0x4b, 0x63, 0x81, 0x22, 0xff, 0xbd, 0x04, 0x13, 0x0f, 0x36, 0x37, 0x37, 0x96, 0x75, + 0xbe, 0xa3, 0xf9, 0x79, 0xfa, 0x45, 0x28, 0x98, 0x0a, 0xdd, 0x8d, 0x57, 0x7a, 0x46, 0xc3, 0x9c, + 0x82, 0x3e, 0x80, 0x22, 0xcb, 0x24, 0x44, 0x6f, 0xf5, 0xd9, 0x6a, 0x0b, 0xf8, 0xba, 0x2b, 0x14, + 0x74, 0x88, 0x62, 0x00, 0x7b, 0x70, 0xf2, 0x1e, 0x9c, 0x0b, 0x99, 0xc3, 0xfc, 0xc1, 0x8f, 0x81, + 0x51, 0x03, 0x86, 0x98, 0x66, 0xef, 0x94, 0xb7, 0xd7, 0x61, 0x66, 0x6c, 0x4a, 0x41, 0xa7, 0xc3, + 0x7e, 0xd9, 0xd8, 0xc5, 0x92, 0x57, 0x61, 0x8c, 0x5f, 0x22, 0x18, 0x16, 0xe5, 0x6e, 0x41, 0x17, + 0x20, 0xdf, 0x51, 0x75, 0x51, 0x67, 0x47, 0x85, 0x4c, 0x9e, 0xd5, 0x08, 0x36, 0xce, 0xc9, 0xca, + 0x81, 0xc8, 0x3c, 0x01, 0x59, 0x39, 0xc0, 0x6c, 0x5c, 0xbe, 0x0f, 0x45, 0xe1, 0xee, 0x30, 0x50, + 0xfe, 0x78, 0xa0, 0x7c, 0x0a, 0xd0, 0x3a, 0x14, 0x97, 0x37, 0xea, 0x9a, 0xe1, 0x76, 0x5d, 0x4d, + 0xb5, 0x65, 0xc5, 0xd7, 0x62, 0x71, 0x79, 0x09, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0x83, 0x26, + 0x31, 0x29, 0x8f, 0x88, 0x91, 0x3a, 0xb0, 0x55, 0xbe, 0xcb, 0x47, 0xb0, 0xa0, 0xc8, 0xff, 0x90, + 0x83, 0xa2, 0x70, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x44, 0xbe, 0xc2, 0x5e, 0xef, 0x2f, 0x34, 0x32, + 0x3f, 0xc1, 0x36, 0x63, 0x9f, 0x60, 0x57, 0xfb, 0xc4, 0x3b, 0xfe, 0xfb, 0xeb, 0x7f, 0x24, 0x18, + 0x8f, 0x06, 0x25, 0xba, 0x09, 0xa3, 0xac, 0xe0, 0xa8, 0x4d, 0xb2, 0x16, 0xf4, 0xb9, 0xfe, 0x21, + 0x4c, 0x23, 0x20, 0xe1, 0x30, 0x1f, 0x6a, 0xfb, 0x62, 0x2c, 0x8e, 0xc4, 0xa4, 0xb3, 0x5d, 0xea, + 0x50, 0x55, 0xab, 0xba, 0x17, 0x63, 0xd5, 0x65, 0x9d, 0xae, 0x5b, 0x0d, 0x6a, 0xa9, 0x7a, 0x3b, + 0xa1, 0x88, 0x07, 0x65, 0x18, 0x59, 0xfe, 0x3f, 0x09, 0x46, 0x85, 0xc9, 0xa7, 0xf0, 0x55, 0xf1, + 0xc7, 0xd1, 0xaf, 0x8a, 0x4b, 0x7d, 0x6e, 0xf0, 0xf4, 0x4f, 0x8a, 0xff, 0x08, 0x4c, 0x67, 0x5b, + 0x9a, 0x45, 0xf5, 0xae, 0x61, 0xd3, 0x78, 0x54, 0xb3, 0xcd, 0x88, 0x39, 0x05, 0x39, 0x30, 0xa9, + 0xc6, 0x72, 0x80, 0x70, 0x6d, 0xad, 0x3f, 0x4b, 0x7c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0xc6, 0x29, + 0x38, 0xa1, 0x42, 0x26, 0x90, 0xe0, 0x42, 0x0f, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, 0x72, 0x5e, 0xdd, + 0x23, 0xf3, 0x04, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x37, 0x30, 0x87, 0x92, 0x7f, 0x15, 0xf8, + 0xa3, 0xe1, 0xc6, 0xb8, 0x9f, 0x4f, 0xa5, 0x93, 0xe4, 0xd3, 0xd1, 0xb4, 0x5c, 0x8a, 0x1e, 0x40, + 0x9e, 0x6a, 0xfd, 0x7e, 0x16, 0x0a, 0xc4, 0xcd, 0x95, 0x46, 0x90, 0x90, 0x36, 0x57, 0x1a, 0x98, + 0x41, 0xa0, 0x75, 0x18, 0x62, 0xd5, 0x87, 0x6d, 0xc1, 0x7c, 0xff, 0x5b, 0x9a, 0xcd, 0x3f, 0x08, + 0x08, 0xf6, 0xcb, 0xc6, 0x2e, 0x8e, 0xfc, 0x09, 0x8c, 0x45, 0xf6, 0x29, 0xfa, 0x18, 0xce, 0x6a, + 0x86, 0xd2, 0xaa, 0x2b, 0x9a, 0xa2, 0x37, 0x89, 0x77, 0x39, 0x70, 0x29, 0xed, 0x0b, 0x63, 0x25, + 0xc4, 0x27, 0x76, 0xb9, 0x7f, 0x9d, 0x1a, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x73, 0x44, + 0x15, 0x18, 0x62, 0x71, 0xe6, 0xd6, 0x93, 0x91, 0xfa, 0x08, 0xb3, 0x90, 0x85, 0x9f, 0x8d, 0xdd, + 0x71, 0x74, 0x03, 0xc0, 0x26, 0x4d, 0x8b, 0x50, 0x9e, 0x0c, 0x72, 0xd1, 0x4b, 0xe5, 0x86, 0x4f, + 0xc1, 0x21, 0x2e, 0xf9, 0x47, 0x12, 0x8c, 0xad, 0x11, 0xfa, 0xa9, 0x61, 0xed, 0x6d, 0xf0, 0xc7, + 0x00, 0xa7, 0x90, 0x6c, 0x71, 0x24, 0xd9, 0xbe, 0xd1, 0x63, 0x65, 0x22, 0xd6, 0x65, 0xa5, 0x5c, + 0xf9, 0x4b, 0x09, 0x66, 0x23, 0x9c, 0x77, 0x83, 0xad, 0xbb, 0x05, 0x43, 0xa6, 0x61, 0x51, 0xaf, + 0x10, 0x0f, 0xa4, 0x90, 0xa5, 0xb1, 0x50, 0x29, 0x66, 0x30, 0xd8, 0x45, 0x43, 0x2b, 0x90, 0xa3, + 0x86, 0x08, 0xd5, 0xc1, 0x30, 0x09, 0xb1, 0xea, 0x20, 0x30, 0x73, 0x9b, 0x06, 0xce, 0x51, 0x83, + 0x2d, 0x44, 0x39, 0xc2, 0x15, 0x4e, 0x3e, 0x2f, 0x68, 0x06, 0x18, 0x0a, 0x3b, 0x96, 0xd1, 0x39, + 0xf1, 0x1c, 0xfc, 0x85, 0xb8, 0x67, 0x19, 0x1d, 0xcc, 0xb1, 0xe4, 0xaf, 0x24, 0x98, 0x8a, 0x70, + 0x9e, 0x42, 0xe2, 0x7f, 0x18, 0x4d, 0xfc, 0x57, 0x07, 0x99, 0x48, 0x46, 0xfa, 0xff, 0x2a, 0x17, + 0x9b, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x35, 0x9e, 0xc3, 0x75, 0xe0, 0x04, 0xab, + 0x9b, 0x1b, 0x01, 0x16, 0x0e, 0x03, 0xa3, 0x03, 0x98, 0xd2, 0x95, 0x0e, 0xb1, 0x4d, 0xa5, 0x49, + 0x1a, 0xcf, 0xe1, 0x80, 0xe4, 0x25, 0x7e, 0xdf, 0x10, 0x47, 0xc4, 0x49, 0x25, 0x68, 0x15, 0x8a, + 0xaa, 0xc9, 0xfb, 0x38, 0xd1, 0xbb, 0xf4, 0xac, 0xa2, 0x6e, 0xd7, 0xe7, 0xe6, 0x73, 0xf1, 0x03, + 0x7b, 0x18, 0xf2, 0x7f, 0xc6, 0xa3, 0x81, 0xc5, 0x1f, 0xba, 0x0f, 0x25, 0xfe, 0xac, 0xa6, 0x69, + 0x68, 0xde, 0xcd, 0x00, 0x5b, 0xd9, 0x0d, 0x31, 0xf6, 0xec, 0xb0, 0x72, 0x3e, 0xe5, 0xd0, 0xd7, + 0x23, 0x63, 0x5f, 0x18, 0xad, 0x41, 0xc1, 0xfc, 0x21, 0x1d, 0x0c, 0x2f, 0x72, 0xbc, 0x6d, 0xe1, + 0x38, 0xf2, 0x5f, 0xe7, 0x63, 0xe6, 0xf2, 0x52, 0xf7, 0xe4, 0xb9, 0xad, 0xba, 0xdf, 0x31, 0x65, + 0xae, 0xfc, 0x36, 0x14, 0x45, 0x85, 0x17, 0xc1, 0xfc, 0xf6, 0x20, 0xc1, 0x1c, 0xae, 0x62, 0xfe, + 0x07, 0x8b, 0x37, 0xe8, 0x01, 0xa3, 0x8f, 0x60, 0x98, 0xb8, 0x2a, 0xdc, 0xda, 0x78, 0x6b, 0x10, + 0x15, 0x41, 0x5e, 0x0d, 0x1a, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x7b, 0xcc, 0x5f, 0x8c, 0x97, 0x7d, + 0x04, 0xda, 0xe5, 0x02, 0x2f, 0x57, 0x17, 0xdc, 0x69, 0xfb, 0xc3, 0xcf, 0x0e, 0x2b, 0x10, 0xfc, + 0xc4, 0x61, 0x09, 0xf9, 0x27, 0x12, 0x4c, 0x71, 0x0f, 0x35, 0x1d, 0x4b, 0xa5, 0xdd, 0x53, 0x2b, + 0x4c, 0x8f, 0x22, 0x85, 0xe9, 0xad, 0x1e, 0x6e, 0x49, 0x58, 0x98, 0x59, 0x9c, 0xbe, 0x96, 0xe0, + 0xa5, 0x04, 0xf7, 0x29, 0xe4, 0xc5, 0xad, 0x68, 0x5e, 0x7c, 0x63, 0xd0, 0x09, 0x65, 0xbd, 0x91, + 0x18, 0x4b, 0x99, 0x0e, 0xdf, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x16, 0x97, + 0xe0, 0xa5, 0xd0, 0xb3, 0x36, 0x9f, 0x82, 0x43, 0x5c, 0xc8, 0x86, 0x99, 0x16, 0xd9, 0x51, 0x1c, + 0x8d, 0x2e, 0xb4, 0x5a, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5c, 0x30, 0x52, + 0xbf, 0xe3, 0x5e, 0x4e, 0xa7, 0x71, 0x3c, 0x3b, 0xac, 0x5c, 0x48, 0xbb, 0x1d, 0xf2, 0x58, 0xba, + 0x38, 0x03, 0x1a, 0x75, 0xa1, 0x6c, 0x91, 0x4f, 0x1c, 0xd5, 0x22, 0xad, 0x25, 0xcb, 0x30, 0x23, + 0x6a, 0xf3, 0x5c, 0xed, 0x1f, 0x1e, 0x1d, 0x56, 0xca, 0x38, 0x83, 0xa7, 0xb7, 0xe2, 0x4c, 0x78, + 0xf4, 0x04, 0xa6, 0x15, 0xf7, 0x35, 0x60, 0x44, 0xab, 0xbb, 0x4b, 0x6e, 0x1f, 0x1d, 0x56, 0xa6, + 0x17, 0x92, 0xe4, 0xde, 0x0a, 0xd3, 0x40, 0x51, 0x0d, 0x8a, 0xfb, 0xfc, 0xad, 0xa2, 0x5d, 0x1e, + 0xe2, 0xf8, 0xac, 0x10, 0x14, 0xdd, 0xe7, 0x8b, 0x0c, 0x73, 0xf8, 0x5e, 0x83, 0xef, 0x3e, 0x8f, + 0x8b, 0x7d, 0x50, 0xb2, 0x5e, 0x52, 0xec, 0x78, 0x7e, 0x62, 0x5c, 0x0a, 0xb2, 0xd6, 0x83, 0x80, + 0x84, 0xc3, 0x7c, 0xe8, 0x31, 0x8c, 0xec, 0x8a, 0x53, 0x09, 0xbb, 0x5c, 0xec, 0xab, 0x08, 0x47, + 0x4e, 0x31, 0xea, 0x53, 0x42, 0xc5, 0x88, 0x37, 0x6c, 0xe3, 0x00, 0x11, 0x5d, 0x81, 0x22, 0xff, + 0xb1, 0xbc, 0xc4, 0x8f, 0xe3, 0x4a, 0x41, 0x6e, 0x7b, 0xe0, 0x0e, 0x63, 0x8f, 0xee, 0xb1, 0x2e, + 0x6f, 0x2c, 0xf2, 0x63, 0xe1, 0x18, 0xeb, 0xf2, 0xc6, 0x22, 0xf6, 0xe8, 0xe8, 0x63, 0x28, 0xda, + 0x64, 0x45, 0xd5, 0x9d, 0x83, 0x32, 0xf4, 0x75, 0xa9, 0xdc, 0xb8, 0xcb, 0xb9, 0x63, 0x07, 0x63, + 0x81, 0x06, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x85, 0x11, 0xcb, 0xd1, 0x17, 0xec, 0x2d, 0x9b, 0x58, + 0xe5, 0x51, 0xae, 0xa3, 0x57, 0x3a, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07, + 0xe0, 0xe8, 0xef, 0x24, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x10, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b, + 0xb3, 0xcb, 0x67, 0xb9, 0xce, 0x3f, 0xea, 0x35, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d, + 0x64, 0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x77, 0x6c, 0xfe, 0x77, 0x79, 0xac, 0x2f, 0xd7, 0xa6, 0x9f, + 0x39, 0x06, 0xae, 0x15, 0x74, 0xec, 0xc1, 0xa2, 0x47, 0x30, 0x63, 0x11, 0xa5, 0xb5, 0xae, 0x6b, + 0x5d, 0x6c, 0x18, 0xf4, 0x9e, 0xaa, 0x11, 0xbb, 0x6b, 0x53, 0xd2, 0x29, 0x8f, 0xf3, 0x65, 0xf7, + 0xdf, 0x7e, 0xe0, 0x54, 0x2e, 0x9c, 0x21, 0x8d, 0x3a, 0x50, 0xf1, 0x52, 0x06, 0xdb, 0x4f, 0x7e, + 0xce, 0xba, 0x6b, 0x37, 0x15, 0xcd, 0xbd, 0x07, 0x98, 0xe0, 0x0a, 0x5e, 0x3b, 0x3a, 0xac, 0x54, + 0x96, 0x8e, 0x67, 0xc5, 0xbd, 0xb0, 0xd0, 0x07, 0x50, 0x56, 0xb2, 0xf4, 0x4c, 0x72, 0x3d, 0xaf, + 0xb0, 0x3c, 0x94, 0xa9, 0x20, 0x53, 0x1a, 0x51, 0x98, 0x54, 0xa2, 0x8f, 0x8e, 0xed, 0xf2, 0x54, + 0x5f, 0x07, 0x91, 0xb1, 0xb7, 0xca, 0xc1, 0x61, 0x44, 0x8c, 0x60, 0xe3, 0x84, 0x06, 0xf4, 0x17, + 0x80, 0x94, 0xf8, 0x3b, 0x69, 0xbb, 0x8c, 0xfa, 0x2a, 0x3f, 0x89, 0x07, 0xd6, 0x41, 0xd8, 0x25, + 0x48, 0x36, 0x4e, 0xd1, 0xc3, 0x1f, 0x6f, 0x88, 0xa3, 0xfc, 0xd3, 0x79, 0x00, 0x3b, 0xd8, 0xe3, + 0x8d, 0xc0, 0xb4, 0xe7, 0xf6, 0x78, 0x23, 0x04, 0x79, 0xfc, 0xe1, 0xe1, 0x2f, 0x72, 0x30, 0x1d, + 0x30, 0xf7, 0xfd, 0x78, 0x23, 0x45, 0xe4, 0x77, 0x8f, 0x60, 0x7b, 0x3f, 0x82, 0xfd, 0x52, 0x82, + 0xf1, 0xc0, 0x75, 0xbf, 0x79, 0x0f, 0x2a, 0x02, 0xdb, 0x32, 0x5a, 0xbc, 0xff, 0xce, 0x85, 0x27, + 0xf0, 0x5b, 0x7f, 0xab, 0xff, 0xc3, 0x5f, 0xae, 0xca, 0x5f, 0xe7, 0x61, 0x32, 0xbe, 0x1b, 0x23, + 0x97, 0xbf, 0x52, 0xcf, 0xcb, 0xdf, 0x0d, 0x38, 0xb7, 0xe3, 0x68, 0x5a, 0x97, 0xbb, 0x21, 0x74, + 0x03, 0xec, 0x5e, 0xde, 0xbc, 0x22, 0x24, 0xcf, 0xdd, 0x4b, 0xe1, 0xc1, 0xa9, 0x92, 0x19, 0x17, + 0xd9, 0xf9, 0x13, 0x5d, 0x64, 0x27, 0xee, 0x55, 0x0b, 0x03, 0xdc, 0xab, 0xa6, 0x5e, 0x4a, 0x0f, + 0x9d, 0xe0, 0x52, 0xfa, 0x24, 0xb7, 0xc8, 0x29, 0x49, 0xac, 0xe7, 0xa3, 0xc6, 0x57, 0x60, 0x4e, + 0x88, 0x51, 0x7e, 0xc1, 0xab, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0x39, 0x9d, 0x4e, 0x57, 0x7e, + 0x17, 0xc6, 0xa3, 0x4f, 0x17, 0xdc, 0x95, 0x76, 0x5f, 0x4f, 0x88, 0x2b, 0xb4, 0xd0, 0x4a, 0xbb, + 0xe3, 0xd8, 0xe7, 0x90, 0xff, 0x46, 0x82, 0x99, 0xf4, 0x27, 0x8a, 0x48, 0x83, 0xf1, 0x8e, 0x72, + 0x10, 0x7e, 0x36, 0x2a, 0x9d, 0xf0, 0x70, 0x83, 0xdf, 0x59, 0xaf, 0x46, 0xb0, 0x70, 0x0c, 0x5b, + 0xfe, 0x5e, 0x82, 0xd9, 0x8c, 0xdb, 0xe2, 0xd3, 0xb5, 0x04, 0x7d, 0x08, 0xa5, 0x8e, 0x72, 0xd0, + 0x70, 0xac, 0x36, 0x39, 0xf1, 0x71, 0x0e, 0xcf, 0x18, 0xab, 0x02, 0x05, 0xfb, 0x78, 0xf2, 0xe7, + 0x12, 0x94, 0xb3, 0x1a, 0x6b, 0x74, 0x33, 0x72, 0xaf, 0xfd, 0x6a, 0xec, 0x5e, 0x7b, 0x2a, 0x21, + 0xf7, 0x82, 0x6e, 0xb5, 0xff, 0x4b, 0x82, 0x99, 0xf4, 0x0f, 0x0c, 0xf4, 0x66, 0xc4, 0xc2, 0x4a, + 0xcc, 0xc2, 0x89, 0x98, 0x94, 0xb0, 0xef, 0x23, 0x18, 0x17, 0x9f, 0x21, 0x02, 0x46, 0x78, 0x55, + 0x4e, 0xcb, 0x95, 0x02, 0xc2, 0x6b, 0xbb, 0xf9, 0x7a, 0x45, 0xc7, 0x70, 0x0c, 0x4d, 0xfe, 0xdb, + 0x1c, 0x0c, 0x35, 0x9a, 0x8a, 0x46, 0x4e, 0xa1, 0xcd, 0x7a, 0x3f, 0xd2, 0x66, 0xf5, 0xfa, 0x17, + 0x0f, 0x6e, 0x55, 0x66, 0x87, 0x85, 0x63, 0x1d, 0xd6, 0xeb, 0x7d, 0xa1, 0x1d, 0xdf, 0x5c, 0xfd, + 0x01, 0x8c, 0xf8, 0x4a, 0x07, 0xcb, 0xf9, 0xf2, 0xbf, 0xe7, 0x60, 0x34, 0xa4, 0x62, 0xc0, 0x8a, + 0xb1, 0x13, 0xa9, 0xb4, 0xfd, 0xfc, 0x63, 0x5d, 0x48, 0x57, 0xd5, 0xab, 0xad, 0xee, 0x13, 0xc5, + 0xe0, 0x51, 0x5a, 0xb2, 0xe4, 0xbe, 0x0b, 0xe3, 0x94, 0xff, 0xe3, 0x99, 0x7f, 0x08, 0x9a, 0xe7, + 0xb1, 0xe8, 0x3f, 0x6c, 0xdd, 0x8c, 0x50, 0x71, 0x8c, 0x7b, 0xee, 0x0e, 0x8c, 0x45, 0x94, 0x0d, + 0xf4, 0xc2, 0xf0, 0x7f, 0x25, 0x78, 0xb5, 0xe7, 0x27, 0x2a, 0xaa, 0x47, 0x36, 0x49, 0x35, 0xb6, + 0x49, 0xe6, 0xb3, 0x01, 0x5e, 0xdc, 0x4b, 0x95, 0xfa, 0xb5, 0xa7, 0xdf, 0xcd, 0x9f, 0xf9, 0xe6, + 0xbb, 0xf9, 0x33, 0xdf, 0x7e, 0x37, 0x7f, 0xe6, 0xaf, 0x8e, 0xe6, 0xa5, 0xa7, 0x47, 0xf3, 0xd2, + 0x37, 0x47, 0xf3, 0xd2, 0xb7, 0x47, 0xf3, 0xd2, 0xcf, 0x8e, 0xe6, 0xa5, 0x7f, 0xfc, 0x7e, 0xfe, + 0xcc, 0x87, 0x45, 0x01, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xb3, 0xc8, 0xe2, 0x54, + 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index cd4f2a01..7a3e7029 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -32,11 +32,10 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). - // +optional - optional string name = 1; +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // Driver is the name of the Flexvolume driver. + optional string driver = 1; } // defines the host volume conditions that will be enabled by a policy @@ -45,7 +44,7 @@ message AllowedHostPath { // is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -100,6 +99,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -197,6 +217,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } message DaemonSetUpdateStrategy { @@ -441,6 +467,7 @@ message IDRange { optional int64 max = 2; } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -582,6 +609,7 @@ message IngressTLS { optional string secretName = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. @@ -594,6 +622,7 @@ message NetworkPolicy { optional NetworkPolicySpec spec = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -615,6 +644,7 @@ message NetworkPolicyEgressRule { repeated NetworkPolicyPeer to = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. message NetworkPolicyIngressRule { // List of ports which should be made accessible on the pods selected for this rule. @@ -634,6 +664,7 @@ message NetworkPolicyIngressRule { repeated NetworkPolicyPeer from = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. @@ -645,6 +676,7 @@ message NetworkPolicyList { repeated NetworkPolicy items = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. message NetworkPolicyPeer { // This is a label selector which selects Pods in this namespace. // This field follows standard label selector semantics. @@ -664,6 +696,7 @@ message NetworkPolicyPeer { optional IPBlock ipBlock = 3; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. message NetworkPolicyPort { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -679,6 +712,7 @@ message NetworkPolicyPort { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. message NetworkPolicySpec { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -752,8 +786,9 @@ message PodSecurityPolicySpec { optional bool privileged = 1; // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional repeated string defaultAddCapabilities = 2; @@ -822,6 +857,12 @@ message PodSecurityPolicySpec { // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional repeated AllowedHostPath allowedHostPaths = 17; + + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -1074,50 +1115,3 @@ message SupplementalGroupsStrategyOptions { repeated IDRange ranges = 2; } -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - // +optional - optional string description = 2; - - // Versions are versions for this third party object - // +optional - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - // +optional - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index 626701cf..7625f678 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, @@ -49,12 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentRollback{}, &ReplicationControllerDummy{}, &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index d3cff2ab..c3d9f72d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -100,63 +100,6 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Description is the description of this object. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` - - // Versions are versions for this third party object - // +optional - Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the raw JSON data for this data. - // +optional - Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -532,6 +475,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -588,20 +558,6 @@ type DaemonSetList struct { Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -926,8 +882,9 @@ type PodSecurityPolicySpec struct { // +optional Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -981,6 +938,11 @@ type PodSecurityPolicySpec struct { // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` } // defines the host volume conditions that will be enabled by a policy @@ -1024,6 +986,12 @@ var ( All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -1144,6 +1112,7 @@ type PodSecurityPolicyList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` @@ -1157,6 +1126,7 @@ type NetworkPolicy struct { Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } +// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. // Policy Type string describes the NetworkPolicy type // This type is beta-level in 1.8 type PolicyType string @@ -1168,6 +1138,7 @@ const ( PolicyTypeEgress PolicyType = "Egress" ) +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. type NetworkPolicySpec struct { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -1210,6 +1181,7 @@ type NetworkPolicySpec struct { PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. type NetworkPolicyIngressRule struct { // List of ports which should be made accessible on the pods selected for this rule. @@ -1229,6 +1201,7 @@ type NetworkPolicyIngressRule struct { From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -1250,6 +1223,7 @@ type NetworkPolicyEgressRule struct { To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -1265,6 +1239,7 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -1279,6 +1254,7 @@ type IPBlock struct { Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. type NetworkPolicyPeer struct { // Exactly one of the following must be specified. @@ -1302,6 +1278,7 @@ type NetworkPolicyPeer struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index f65f895e..236d934f 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,13 +27,13 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "Driver is the name of the Flexvolume driver.", } -func (APIVersion) SwaggerDoc() map[string]string { - return map_APIVersion +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume } var map_AllowedHostPath = map[string]string{ @@ -75,6 +75,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -110,6 +123,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -264,7 +278,7 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } @@ -352,7 +366,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { } var map_NetworkPolicy = map[string]string{ - "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -362,7 +376,7 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { } var map_NetworkPolicyEgressRule = map[string]string{ - "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } @@ -372,7 +386,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyIngressRule = map[string]string{ - "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } @@ -382,7 +396,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyList = map[string]string{ - "": "Network Policy List is a list of NetworkPolicy objects.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -392,6 +406,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { } var map_NetworkPolicyPeer = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If present but empty, this selector selects all pods in this namespace.", "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If present but empty, this selector selects all namespaces.", "ipBlock": "IPBlock defines policy on a particular IPBlock", @@ -402,6 +417,7 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { } var map_NetworkPolicyPort = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", } @@ -411,6 +427,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { } var map_NetworkPolicySpec = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", @@ -444,7 +461,7 @@ func (PodSecurityPolicyList) SwaggerDoc() map[string]string { var map_PodSecurityPolicySpec = map[string]string{ "": "Pod Security Policy Spec defines the policy enforced.", "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the AllowedCapabilities list.", "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", @@ -460,6 +477,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -623,45 +641,4 @@ func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { return map_SupplementalGroupsStrategyOptions } -var map_ThirdPartyResource = map[string]string{ - "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "metadata": "Standard object metadata", - "description": "Description is the description of this object.", - "versions": "Versions are versions for this third party object", -} - -func (ThirdPartyResource) SwaggerDoc() map[string]string { - return map_ThirdPartyResource -} - -var map_ThirdPartyResourceData = map[string]string{ - "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - "metadata": "Standard object metadata.", - "data": "Data is the raw JSON data for this data.", -} - -func (ThirdPartyResourceData) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceData -} - -var map_ThirdPartyResourceDataList = map[string]string{ - "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ThirdpartyResourceData.", -} - -func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceDataList -} - -var map_ThirdPartyResourceList = map[string]string{ - "": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "metadata": "Standard list metadata.", - "items": "Items is the list of ThirdPartyResources.", -} - -func (ThirdPartyResourceList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceList -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index c173b3f4..02a84ccc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -23,281 +23,22 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IDRange).DeepCopyInto(out.(*IDRange)) - return nil - }, InType: reflect.TypeOf(&IDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -427,6 +168,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -509,6 +267,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1445,6 +1210,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1831,135 +1601,3 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index 8bcc30b0..ef9ae2ae 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 62402b87..ae28d2f2 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -187,3 +187,4 @@ message NetworkPolicySpec { // +optional repeated string policyTypes = 4; } + diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go index 2ba9951d..f47f22e9 100644 --- a/vendor/k8s.io/api/networking/v1/register.go +++ b/vendor/k8s.io/api/networking/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NetworkPolicy{}, diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 0e670966..955e7434 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -23,57 +23,10 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 1e9f4974..9c456f92 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index af74c52c..a276be1c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -111,3 +111,4 @@ message PodDisruptionBudgetStatus { // total number of pods counted by this disruption budget optional int32 expectedPods = 6; } + diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index e0c6247a..d77f1304 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 93c201e2..70872f09 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,45 +22,10 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Eviction).DeepCopyInto(out.(*Eviction)) - return nil - }, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudget).DeepCopyInto(out.(*PodDisruptionBudget)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetList).DeepCopyInto(out.(*PodDisruptionBudgetList)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetSpec).DeepCopyInto(out.(*PodDisruptionBudgetSpec)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetStatus).DeepCopyInto(out.(*PodDisruptionBudgetStatus)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 737261a0..28ceb269 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 1285ac19..1530d379 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,57 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 743 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x94, 0x4f, 0x6b, 0x13, 0x4f, - 0x18, 0xc7, 0x33, 0xf9, 0x43, 0xb3, 0x93, 0x5f, 0xc8, 0xaf, 0x2b, 0xc8, 0x52, 0x61, 0x13, 0x72, - 0x90, 0x80, 0xba, 0x6b, 0xaa, 0xa8, 0x20, 0x3d, 0xb8, 0x15, 0xa5, 0xb4, 0xd6, 0x32, 0xa2, 0x07, - 0xf1, 0xe0, 0x66, 0x33, 0x4d, 0xc7, 0x64, 0xff, 0x30, 0x33, 0x1b, 0x28, 0x5e, 0xc4, 0x9b, 0x37, - 0xdf, 0x85, 0x17, 0xbd, 0xe9, 0x2b, 0xf0, 0xd2, 0x63, 0x8f, 0x3d, 0x05, 0xbb, 0xbe, 0x10, 0x65, - 0x66, 0x77, 0xb3, 0x49, 0x93, 0xd8, 0x9e, 0x02, 0xe2, 0x29, 0x99, 0xe7, 0xf9, 0x7c, 0x9f, 0xf9, - 0xce, 0xb3, 0x33, 0x0f, 0xbc, 0xdf, 0xbf, 0xc7, 0x0c, 0xe2, 0x9b, 0xfd, 0xb0, 0x83, 0xa9, 0x87, - 0x39, 0x66, 0xe6, 0x10, 0x7b, 0x5d, 0x9f, 0x9a, 0x49, 0xc2, 0x0e, 0x88, 0x49, 0x3b, 0xb6, 0x63, - 0x0e, 0xdb, 0x66, 0x0f, 0x7b, 0x98, 0xda, 0x1c, 0x77, 0x8d, 0x80, 0xfa, 0xdc, 0x57, 0xd5, 0x98, - 0x31, 0xec, 0x80, 0x18, 0x82, 0x31, 0x86, 0xed, 0xb5, 0x1b, 0x3d, 0xc2, 0x0f, 0xc2, 0x8e, 0xe1, - 0xf8, 0xae, 0xd9, 0xf3, 0x7b, 0xbe, 0x29, 0xd1, 0x4e, 0xb8, 0x2f, 0x57, 0x72, 0x21, 0xff, 0xc5, - 0x25, 0xd6, 0x6e, 0x67, 0xdb, 0xb8, 0xb6, 0x73, 0x40, 0x3c, 0x4c, 0x0f, 0xcd, 0xa0, 0xdf, 0x13, - 0x01, 0x66, 0xba, 0x98, 0xdb, 0x73, 0x36, 0x5e, 0x33, 0x17, 0xa9, 0x68, 0xe8, 0x71, 0xe2, 0xe2, - 0x19, 0xc1, 0x9d, 0xf3, 0x04, 0xcc, 0x39, 0xc0, 0xae, 0x3d, 0xa3, 0xbb, 0xb5, 0x48, 0x17, 0x72, - 0x32, 0x30, 0x89, 0xc7, 0x19, 0xa7, 0x67, 0x45, 0xcd, 0xaf, 0x00, 0x56, 0x36, 0x07, 0x21, 0xe3, - 0x98, 0x22, 0x7f, 0x80, 0xd5, 0xd7, 0xb0, 0x2c, 0x0e, 0xd2, 0xb5, 0xb9, 0xad, 0x81, 0x06, 0x68, - 0x55, 0xd6, 0x6f, 0x1a, 0x59, 0xe7, 0xc6, 0x75, 0x8d, 0xa0, 0xdf, 0x13, 0x01, 0x66, 0x08, 0xda, - 0x18, 0xb6, 0x8d, 0xa7, 0x9d, 0x37, 0xd8, 0xe1, 0x4f, 0x30, 0xb7, 0x2d, 0xf5, 0x68, 0x54, 0xcf, - 0x45, 0xa3, 0x3a, 0xcc, 0x62, 0x68, 0x5c, 0x55, 0xdd, 0x84, 0x25, 0x1a, 0x0e, 0x30, 0xd3, 0xf2, - 0x8d, 0x42, 0xab, 0xb2, 0xae, 0x1b, 0xb3, 0x1f, 0xc6, 0xd8, 0xf3, 0x07, 0xc4, 0x39, 0x44, 0xe1, - 0x00, 0x5b, 0xd5, 0xa4, 0x58, 0x49, 0xac, 0x18, 0x8a, 0xb5, 0xcd, 0x0f, 0x79, 0xa8, 0x4e, 0xd8, - 0xb6, 0x88, 0xd7, 0x25, 0x5e, 0x6f, 0x09, 0xee, 0xb7, 0x60, 0x99, 0x85, 0x32, 0x91, 0x1e, 0xe0, - 0xca, 0xbc, 0x03, 0x3c, 0x8b, 0x19, 0xeb, 0xff, 0xa4, 0x58, 0x39, 0x09, 0x30, 0x34, 0x96, 0xab, - 0x8f, 0xe0, 0x0a, 0xf5, 0x07, 0x18, 0xe1, 0x7d, 0xad, 0x20, 0xbd, 0xce, 0xad, 0x84, 0x62, 0xc4, - 0xaa, 0x25, 0x95, 0x56, 0x92, 0x00, 0x4a, 0xc5, 0xcd, 0xef, 0x00, 0x5e, 0x9e, 0xed, 0xc5, 0x0e, - 0x61, 0x5c, 0x7d, 0x35, 0xd3, 0x0f, 0xe3, 0x62, 0xfd, 0x10, 0x6a, 0xd9, 0x8d, 0xf1, 0x01, 0xd2, - 0xc8, 0x44, 0x2f, 0xb6, 0x61, 0x89, 0x70, 0xec, 0xa6, 0x8d, 0xb8, 0x3a, 0xcf, 0xfe, 0xac, 0xb1, - 0xec, 0x8b, 0x6e, 0x09, 0x31, 0x8a, 0x6b, 0x34, 0xbf, 0x01, 0x58, 0x9b, 0x80, 0x97, 0x60, 0xff, - 0xe1, 0xb4, 0xfd, 0xfa, 0x79, 0xf6, 0xe7, 0xfb, 0xfe, 0x05, 0x20, 0xcc, 0xae, 0xab, 0x5a, 0x87, - 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x81, 0x46, 0xa1, 0xa5, 0x58, 0x8a, 0xe0, 0x5f, 0x88, 0x00, 0x8a, - 0xe3, 0xea, 0x35, 0xa8, 0xd8, 0x01, 0x79, 0x4c, 0xfd, 0x30, 0x88, 0x77, 0x56, 0xac, 0x6a, 0x34, - 0xaa, 0x2b, 0x0f, 0xf6, 0xb6, 0xe2, 0x20, 0xca, 0xf2, 0x02, 0xa6, 0x98, 0xf9, 0x21, 0x75, 0x30, - 0xd3, 0x0a, 0x19, 0x8c, 0xd2, 0x20, 0xca, 0xf2, 0xea, 0x5d, 0x58, 0x4d, 0x17, 0xbb, 0xb6, 0x8b, - 0x99, 0x56, 0x94, 0x82, 0xd5, 0x68, 0x54, 0xaf, 0xa2, 0xc9, 0x04, 0x9a, 0xe6, 0xd4, 0x0d, 0x58, - 0xf3, 0x7c, 0x2f, 0x45, 0x9e, 0xa3, 0x1d, 0xa6, 0x95, 0xa4, 0xf4, 0x52, 0x34, 0xaa, 0xd7, 0x76, - 0xa7, 0x53, 0xe8, 0x2c, 0xdb, 0xfc, 0x02, 0x60, 0xf1, 0x6f, 0x9a, 0x1d, 0xef, 0xf3, 0xb0, 0xf2, - 0xcf, 0x0f, 0x0d, 0xf1, 0xdc, 0x96, 0x3b, 0x2d, 0x2e, 0xf2, 0xdc, 0xce, 0x1f, 0x13, 0x9f, 0x00, - 0x2c, 0x2f, 0x69, 0x3e, 0x6c, 0x4c, 0x1b, 0xd6, 0x16, 0x1a, 0x9e, 0xef, 0xf4, 0x2d, 0x4c, 0xbb, - 0xae, 0x5e, 0x87, 0xe5, 0xf4, 0x4d, 0x4b, 0x9f, 0x4a, 0xb6, 0x6f, 0xfa, 0xec, 0xd1, 0x98, 0x50, - 0x1b, 0xb0, 0xd8, 0x27, 0x5e, 0x57, 0xcb, 0x4b, 0xf2, 0xbf, 0x84, 0x2c, 0x6e, 0x13, 0xaf, 0x8b, - 0x64, 0x46, 0x10, 0x9e, 0xed, 0x62, 0x79, 0x03, 0x26, 0x08, 0xf1, 0x9a, 0x91, 0xcc, 0x34, 0x3f, - 0x03, 0xb8, 0x92, 0xdc, 0x9e, 0x71, 0x3d, 0xb0, 0xb0, 0xde, 0xa4, 0xbf, 0xfc, 0x45, 0xfc, 0xfd, - 0x79, 0x77, 0xd5, 0x84, 0x8a, 0xf8, 0x65, 0x81, 0xed, 0x60, 0xad, 0x28, 0xb1, 0xd5, 0x04, 0x53, - 0x76, 0xd3, 0x04, 0xca, 0x18, 0xab, 0x75, 0x74, 0xaa, 0xe7, 0x8e, 0x4f, 0xf5, 0xdc, 0xc9, 0xa9, - 0x9e, 0x7b, 0x17, 0xe9, 0xe0, 0x28, 0xd2, 0xc1, 0x71, 0xa4, 0x83, 0x93, 0x48, 0x07, 0x3f, 0x22, - 0x1d, 0x7c, 0xfc, 0xa9, 0xe7, 0x5e, 0xe6, 0x87, 0xed, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x66, - 0x92, 0x08, 0x1d, 0x04, 0x0a, 0x00, 0x00, + // 827 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x8b, 0x23, 0x45, + 0x18, 0x4d, 0x65, 0x12, 0x26, 0x5d, 0x31, 0xc4, 0x2d, 0x17, 0x69, 0xa2, 0x74, 0x86, 0x16, 0x24, + 0xa0, 0x76, 0x9b, 0x5d, 0x51, 0x41, 0xf6, 0xb0, 0xbd, 0xa2, 0x0c, 0x3b, 0x8e, 0x4b, 0x2d, 0x7a, + 0x10, 0x0f, 0x56, 0x77, 0x6a, 0x3b, 0x65, 0xfa, 0x17, 0x55, 0xd5, 0x81, 0xc5, 0x8b, 0x08, 0x1e, + 0xbc, 0x79, 0xd4, 0xbf, 0xc0, 0x8b, 0x1e, 0xfd, 0x0b, 0xbc, 0xcc, 0x71, 0x8f, 0x7b, 0x0a, 0x4e, + 0xfb, 0x87, 0x28, 0xfd, 0x2b, 0x9d, 0xa4, 0x3b, 0x4e, 0x4e, 0x01, 0xf1, 0x34, 0x53, 0xdf, 0xf7, + 0xde, 0xfb, 0x5e, 0xbf, 0xa9, 0xaf, 0x06, 0x7e, 0xb0, 0x78, 0x5f, 0x18, 0x2c, 0x34, 0x17, 0xb1, + 0x4d, 0x79, 0x40, 0x25, 0x15, 0xe6, 0x92, 0x06, 0xb3, 0x90, 0x9b, 0x45, 0x83, 0x44, 0xcc, 0xe4, + 0x36, 0x71, 0xcc, 0xe5, 0xd4, 0x74, 0x69, 0x40, 0x39, 0x91, 0x74, 0x66, 0x44, 0x3c, 0x94, 0x21, + 0x42, 0x39, 0xc6, 0x20, 0x11, 0x33, 0x52, 0x8c, 0xb1, 0x9c, 0x8e, 0xde, 0x72, 0x99, 0x9c, 0xc7, + 0xb6, 0xe1, 0x84, 0xbe, 0xe9, 0x86, 0x6e, 0x68, 0x66, 0x50, 0x3b, 0x7e, 0x92, 0x9d, 0xb2, 0x43, + 0xf6, 0x5b, 0x2e, 0x31, 0x9a, 0xd4, 0xc7, 0x10, 0x2f, 0x9a, 0x93, 0xda, 0xb0, 0xd1, 0x3b, 0x15, + 0xd2, 0x27, 0xce, 0x9c, 0x05, 0x94, 0x3f, 0x35, 0xa3, 0x85, 0x9b, 0x16, 0x84, 0xe9, 0x53, 0x49, + 0x1a, 0x2c, 0x8e, 0xcc, 0x7d, 0x2c, 0x1e, 0x07, 0x92, 0xf9, 0xb4, 0x46, 0x78, 0xf7, 0x26, 0x82, + 0x70, 0xe6, 0xd4, 0x27, 0x35, 0xde, 0xdd, 0x7d, 0xbc, 0x58, 0x32, 0xcf, 0x64, 0x81, 0x14, 0x92, + 0xef, 0x92, 0xf4, 0x9f, 0x01, 0x1c, 0xde, 0x77, 0x5d, 0x4e, 0x5d, 0x22, 0x59, 0x18, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0x6f, 0x3b, 0x5e, 0x2c, 0x24, 0xe5, 0x38, 0xf4, 0xe8, 0x63, 0xea, 0x51, + 0x47, 0x86, 0x5c, 0xa8, 0xe0, 0xec, 0x64, 0xd2, 0xbf, 0x73, 0xd7, 0xa8, 0x42, 0x5f, 0x0f, 0x32, + 0xa2, 0x85, 0x9b, 0x16, 0x84, 0x91, 0xe6, 0x60, 0x2c, 0xa7, 0xc6, 0x05, 0xb1, 0xa9, 0x57, 0x72, + 0xad, 0x57, 0xaf, 0x56, 0xe3, 0x56, 0xb2, 0x1a, 0xdf, 0x7e, 0xd0, 0x20, 0x8c, 0x1b, 0xc7, 0xe9, + 0x3f, 0xb5, 0x61, 0x7f, 0x03, 0x8e, 0xbe, 0x82, 0xbd, 0x54, 0x7c, 0x46, 0x24, 0x51, 0xc1, 0x19, + 0x98, 0xf4, 0xef, 0xbc, 0x7d, 0x98, 0x95, 0x4f, 0xed, 0xaf, 0xa9, 0x23, 0x3f, 0xa1, 0x92, 0x58, + 0xa8, 0xf0, 0x01, 0xab, 0x1a, 0x5e, 0xab, 0xa2, 0x07, 0xb0, 0xcb, 0x63, 0x8f, 0x0a, 0xb5, 0x9d, + 0x7d, 0xa9, 0x66, 0xd4, 0xaf, 0x97, 0xf1, 0x28, 0xf4, 0x98, 0xf3, 0x34, 0x0d, 0xca, 0x1a, 0x14, + 0x62, 0xdd, 0xf4, 0x24, 0x70, 0xce, 0x45, 0x36, 0x1c, 0x92, 0xed, 0x44, 0xd5, 0x93, 0xcc, 0xed, + 0x6b, 0x4d, 0x72, 0x3b, 0xe1, 0x5b, 0x2f, 0x25, 0xab, 0xf1, 0xee, 0x5f, 0x04, 0xef, 0x0a, 0xea, + 0x3f, 0xb4, 0x21, 0xda, 0x88, 0xc6, 0x62, 0xc1, 0x8c, 0x05, 0xee, 0x11, 0x12, 0x3a, 0x87, 0x3d, + 0x11, 0x67, 0x8d, 0x32, 0xa4, 0x57, 0x9a, 0xbe, 0xea, 0x71, 0x8e, 0xb1, 0x5e, 0x2c, 0xc4, 0x7a, + 0x45, 0x41, 0xe0, 0x35, 0x1d, 0x7d, 0x04, 0x4f, 0x79, 0xe8, 0x51, 0x4c, 0x9f, 0x14, 0xf9, 0x34, + 0x2a, 0xe1, 0x1c, 0x62, 0x0d, 0x0b, 0xa5, 0xd3, 0xa2, 0x80, 0x4b, 0xb2, 0xfe, 0x07, 0x80, 0x2f, + 0xd7, 0xb3, 0xb8, 0x60, 0x42, 0xa2, 0x2f, 0x6b, 0x79, 0x18, 0x07, 0x5e, 0x5e, 0x26, 0xf2, 0x34, + 0xd6, 0x1f, 0x50, 0x56, 0x36, 0xb2, 0x78, 0x08, 0xbb, 0x4c, 0x52, 0xbf, 0x0c, 0xe2, 0xf5, 0x26, + 0xfb, 0x75, 0x63, 0xd5, 0xad, 0x39, 0x4f, 0xc9, 0x38, 0xd7, 0xd0, 0x7f, 0x07, 0x70, 0xb8, 0x01, + 0x3e, 0x82, 0xfd, 0x0f, 0xb7, 0xed, 0x8f, 0x6f, 0xb2, 0xdf, 0xec, 0xfb, 0x6f, 0x00, 0x61, 0xb5, + 0x12, 0x68, 0x0c, 0xbb, 0x4b, 0xca, 0xed, 0xfc, 0xad, 0x50, 0x2c, 0x25, 0xc5, 0x7f, 0x9e, 0x16, + 0x70, 0x5e, 0x47, 0x6f, 0x40, 0x85, 0x44, 0xec, 0x63, 0x1e, 0xc6, 0x51, 0x3e, 0x59, 0xb1, 0x06, + 0xc9, 0x6a, 0xac, 0xdc, 0x7f, 0x74, 0x9e, 0x17, 0x71, 0xd5, 0x4f, 0xc1, 0x9c, 0x8a, 0x30, 0xe6, + 0x0e, 0x15, 0xea, 0x49, 0x05, 0xc6, 0x65, 0x11, 0x57, 0x7d, 0xf4, 0x1e, 0x1c, 0x94, 0x87, 0x4b, + 0xe2, 0x53, 0xa1, 0x76, 0x32, 0xc2, 0xad, 0x64, 0x35, 0x1e, 0xe0, 0xcd, 0x06, 0xde, 0xc6, 0xa1, + 0x7b, 0x70, 0x18, 0x84, 0x41, 0x09, 0xf9, 0x0c, 0x5f, 0x08, 0xb5, 0x9b, 0x51, 0xb3, 0x5d, 0xbc, + 0xdc, 0x6e, 0xe1, 0x5d, 0xac, 0xfe, 0x1b, 0x80, 0x9d, 0xff, 0xd0, 0xfb, 0xa4, 0x7f, 0xd7, 0x86, + 0xfd, 0xff, 0xfd, 0xa3, 0x91, 0xae, 0xdb, 0x71, 0x5f, 0x8b, 0x43, 0xd6, 0xed, 0xe6, 0x67, 0xe2, + 0x17, 0x00, 0x7b, 0x47, 0x7a, 0x1f, 0xee, 0x6d, 0x1b, 0x56, 0xf7, 0x1a, 0x6e, 0x76, 0xfa, 0x0d, + 0x2c, 0x53, 0x47, 0x6f, 0xc2, 0x5e, 0xb9, 0xd3, 0x99, 0x4f, 0xa5, 0x9a, 0x5b, 0xae, 0x3d, 0x5e, + 0x23, 0xd0, 0x19, 0xec, 0x2c, 0x58, 0x30, 0x53, 0xdb, 0x19, 0xf2, 0x85, 0x02, 0xd9, 0x79, 0xc8, + 0x82, 0x19, 0xce, 0x3a, 0x29, 0x22, 0x20, 0x7e, 0xfe, 0x6f, 0x75, 0x03, 0x91, 0x6e, 0x33, 0xce, + 0x3a, 0xfa, 0xaf, 0x00, 0x9e, 0x16, 0xb7, 0x67, 0xad, 0x07, 0xf6, 0xea, 0x6d, 0xfa, 0x6b, 0x1f, + 0xe2, 0xef, 0xdf, 0xa7, 0x23, 0x13, 0x2a, 0xe9, 0x4f, 0x11, 0x11, 0x87, 0xaa, 0x9d, 0x0c, 0x76, + 0xab, 0x80, 0x29, 0x97, 0x65, 0x03, 0x57, 0x18, 0x6b, 0x72, 0x75, 0xad, 0xb5, 0x9e, 0x5d, 0x6b, + 0xad, 0xe7, 0xd7, 0x5a, 0xeb, 0xdb, 0x44, 0x03, 0x57, 0x89, 0x06, 0x9e, 0x25, 0x1a, 0x78, 0x9e, + 0x68, 0xe0, 0xcf, 0x44, 0x03, 0x3f, 0xfe, 0xa5, 0xb5, 0xbe, 0x68, 0x2f, 0xa7, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x32, 0xe3, 0x23, 0xf8, 0x2e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 796a28f4..6edb2779 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -179,3 +194,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go index 7336b545..8f1fd460 100644 --- a/vendor/k8s.io/api/rbac/v1/register.go +++ b/vendor/k8s.io/api/rbac/v1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 8dbd1a8b..91990548 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -170,6 +170,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 7770d408..280ae5a8 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 7ffc8186..e1aab581 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]meta_v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 619b47bf..5236a477 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 31e68aee..c66cadd9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,53 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 766 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0xf9, 0xa0, 0xc9, 0x86, 0x28, 0xd4, 0x48, 0xc8, 0xea, 0xc1, 0x09, 0x11, 0x48, - 0x95, 0x28, 0x36, 0x2d, 0x08, 0xb8, 0x70, 0x68, 0x7a, 0x40, 0x81, 0xd2, 0x96, 0x45, 0xf4, 0x80, - 0x38, 0xb0, 0x71, 0xb6, 0xc9, 0x12, 0x7f, 0x69, 0xd7, 0x8e, 0x54, 0x71, 0xe1, 0x09, 0x10, 0x17, - 0x1e, 0x83, 0x0b, 0xdc, 0xe0, 0x05, 0xca, 0xad, 0xc7, 0x9e, 0x22, 0x6a, 0x1e, 0x04, 0xb4, 0x6b, - 0x3b, 0x4e, 0x9a, 0x86, 0xf4, 0x14, 0x09, 0x89, 0x93, 0xbd, 0x33, 0xbf, 0xf9, 0xef, 0xcc, 0xec, - 0xee, 0xc0, 0xcd, 0xfe, 0x43, 0xae, 0x53, 0xd7, 0xe8, 0x07, 0x6d, 0xc2, 0x1c, 0xe2, 0x13, 0x6e, - 0x0c, 0x88, 0xd3, 0x71, 0x99, 0x11, 0x3b, 0xb0, 0x47, 0x0d, 0xd6, 0xc6, 0xa6, 0x31, 0x58, 0xc7, - 0x96, 0xd7, 0xc3, 0xeb, 0x46, 0x97, 0x38, 0x84, 0x61, 0x9f, 0x74, 0x74, 0x8f, 0xb9, 0xbe, 0xab, - 0xa8, 0x11, 0xa9, 0x63, 0x8f, 0xea, 0x82, 0xd4, 0x13, 0x72, 0xe5, 0x76, 0x97, 0xfa, 0xbd, 0xa0, - 0xad, 0x9b, 0xae, 0x6d, 0x74, 0xdd, 0xae, 0x6b, 0xc8, 0x80, 0x76, 0x70, 0x20, 0x57, 0x72, 0x21, - 0xff, 0x22, 0xa1, 0x95, 0x7b, 0xe9, 0x96, 0x36, 0x36, 0x7b, 0xd4, 0x21, 0xec, 0xd0, 0xf0, 0xfa, - 0x5d, 0x61, 0xe0, 0x86, 0x4d, 0x7c, 0x6c, 0x0c, 0xa6, 0xb6, 0x5f, 0x31, 0x66, 0x45, 0xb1, 0xc0, - 0xf1, 0xa9, 0x4d, 0xa6, 0x02, 0xee, 0xcf, 0x0b, 0xe0, 0x66, 0x8f, 0xd8, 0x78, 0x2a, 0xee, 0xee, - 0xac, 0xb8, 0xc0, 0xa7, 0x96, 0x41, 0x1d, 0x9f, 0xfb, 0xec, 0x6c, 0x50, 0xe3, 0x1b, 0x80, 0xe5, - 0x2d, 0x2b, 0xe0, 0x3e, 0x61, 0xc8, 0xb5, 0x88, 0xf2, 0x06, 0x16, 0x45, 0x21, 0x1d, 0xec, 0x63, - 0x15, 0xd4, 0xc1, 0x6a, 0x79, 0xe3, 0x8e, 0x9e, 0xf6, 0x6f, 0xa4, 0xab, 0x7b, 0xfd, 0xae, 0x30, - 0x70, 0x5d, 0xd0, 0xfa, 0x60, 0x5d, 0xdf, 0x6d, 0xbf, 0x25, 0xa6, 0xff, 0x8c, 0xf8, 0xb8, 0xa9, - 0x1c, 0x0d, 0x6b, 0x99, 0x70, 0x58, 0x83, 0xa9, 0x0d, 0x8d, 0x54, 0x95, 0x16, 0x2c, 0xb0, 0xc0, - 0x22, 0x5c, 0xcd, 0xd6, 0x73, 0xab, 0xe5, 0x8d, 0x1b, 0xfa, 0xac, 0xe3, 0xd1, 0xf7, 0x5c, 0x8b, - 0x9a, 0x87, 0x28, 0xb0, 0x48, 0xb3, 0x12, 0x4b, 0x16, 0xc4, 0x8a, 0xa3, 0x48, 0xa1, 0xf1, 0x29, - 0x0b, 0x95, 0xb1, 0xe4, 0x9b, 0xd4, 0xe9, 0x50, 0xa7, 0xbb, 0x80, 0x1a, 0x76, 0x61, 0x91, 0x07, - 0xd2, 0x91, 0x94, 0x71, 0x7d, 0x76, 0x19, 0x2f, 0x22, 0xb2, 0x79, 0x25, 0x96, 0x2c, 0xc6, 0x06, - 0x8e, 0x46, 0x22, 0xca, 0x36, 0x5c, 0x62, 0xae, 0x45, 0x10, 0x39, 0x50, 0x73, 0x32, 0xe3, 0xbf, - 0xe8, 0xa1, 0x08, 0x6c, 0x56, 0x63, 0xbd, 0xa5, 0xd8, 0x80, 0x12, 0x89, 0xc6, 0x0f, 0x00, 0xaf, - 0x4d, 0xf7, 0x65, 0x9b, 0x72, 0x5f, 0x79, 0x3d, 0xd5, 0x1b, 0xfd, 0x62, 0xbd, 0x11, 0xd1, 0xb2, - 0x33, 0xa3, 0x32, 0x12, 0xcb, 0x58, 0x5f, 0x9e, 0xc3, 0x02, 0xf5, 0x89, 0x9d, 0x34, 0x65, 0x6d, - 0x76, 0x11, 0xd3, 0xe9, 0xa5, 0x67, 0xdc, 0x12, 0x12, 0x28, 0x52, 0x6a, 0x7c, 0x07, 0xb0, 0x3a, - 0x06, 0x2f, 0xa0, 0x88, 0x27, 0x93, 0x45, 0xdc, 0xbc, 0x58, 0x11, 0xe7, 0x67, 0xff, 0x1b, 0x40, - 0x98, 0x5e, 0x63, 0xa5, 0x06, 0x0b, 0x03, 0xc2, 0xda, 0x5c, 0x05, 0xf5, 0xdc, 0x6a, 0xa9, 0x59, - 0x12, 0xfc, 0xbe, 0x30, 0xa0, 0xc8, 0xae, 0xdc, 0x82, 0x25, 0xec, 0xd1, 0xc7, 0xcc, 0x0d, 0x3c, - 0xae, 0xe6, 0x24, 0x54, 0x09, 0x87, 0xb5, 0xd2, 0xe6, 0x5e, 0x2b, 0x32, 0xa2, 0xd4, 0x2f, 0x60, - 0x46, 0xb8, 0x1b, 0x30, 0x93, 0x70, 0x35, 0x9f, 0xc2, 0x28, 0x31, 0xa2, 0xd4, 0xaf, 0x3c, 0x80, - 0x95, 0x64, 0xb1, 0x83, 0x6d, 0xc2, 0xd5, 0x82, 0x0c, 0x58, 0x0e, 0x87, 0xb5, 0x0a, 0x1a, 0x77, - 0xa0, 0x49, 0x4e, 0x79, 0x04, 0xab, 0x8e, 0xeb, 0x24, 0xc8, 0x4b, 0xb4, 0xcd, 0xd5, 0x4b, 0x32, - 0xf4, 0x6a, 0x38, 0xac, 0x55, 0x77, 0x26, 0x5d, 0xe8, 0x2c, 0xdb, 0xf8, 0x0a, 0x60, 0xfe, 0xdf, - 0x9b, 0x2c, 0x1f, 0xb2, 0xb0, 0xfc, 0x7f, 0xa4, 0x8c, 0x8d, 0x14, 0xf1, 0x0c, 0x17, 0x3b, 0x4b, - 0x2e, 0xfe, 0x0c, 0xe7, 0x0f, 0x91, 0xcf, 0x00, 0x16, 0x17, 0x34, 0x3d, 0xb6, 0x26, 0xd3, 0xd6, - 0xe6, 0xa4, 0x7d, 0x7e, 0xbe, 0xef, 0x60, 0x72, 0x02, 0xca, 0x1a, 0x2c, 0x26, 0x2f, 0x5e, 0x66, - 0x5b, 0x4a, 0x77, 0x4f, 0x86, 0x02, 0x1a, 0x11, 0x4a, 0x1d, 0xe6, 0xfb, 0xd4, 0xe9, 0xa8, 0x59, - 0x49, 0x5e, 0x8e, 0xc9, 0xfc, 0x53, 0xea, 0x74, 0x90, 0xf4, 0x08, 0xc2, 0xc1, 0x36, 0x91, 0x77, - 0x62, 0x8c, 0x10, 0x6f, 0x1d, 0x49, 0x4f, 0xe3, 0x0b, 0x80, 0x4b, 0xf1, 0x7d, 0x1a, 0xe9, 0x81, - 0x99, 0x7a, 0x1b, 0x10, 0x62, 0x8f, 0xee, 0x13, 0xc6, 0xa9, 0xeb, 0xc4, 0xfb, 0x8e, 0x6e, 0xfa, - 0xe6, 0x5e, 0x2b, 0xf6, 0xa0, 0x31, 0x6a, 0x7e, 0x0e, 0x8a, 0x01, 0x4b, 0xe2, 0xcb, 0x3d, 0x6c, - 0x12, 0x35, 0x2f, 0xb1, 0xe5, 0x18, 0x2b, 0xed, 0x24, 0x0e, 0x94, 0x32, 0x4d, 0xfd, 0xe8, 0x54, - 0xcb, 0x1c, 0x9f, 0x6a, 0x99, 0x93, 0x53, 0x2d, 0xf3, 0x3e, 0xd4, 0xc0, 0x51, 0xa8, 0x81, 0xe3, - 0x50, 0x03, 0x27, 0xa1, 0x06, 0x7e, 0x86, 0x1a, 0xf8, 0xf8, 0x4b, 0xcb, 0xbc, 0x2a, 0x26, 0xcd, - 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x38, 0x05, 0x46, 0x58, 0x0a, 0x00, 0x00, + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x65, 0x15, 0x6e, 0x38, 0x21, 0x6b, 0x85, 0x9c, 0xc5, 0x02, + 0xe9, 0x10, 0x87, 0xcd, 0xee, 0x21, 0xa0, 0xa1, 0x58, 0x5f, 0x81, 0x16, 0x96, 0xbd, 0x65, 0x4e, + 0x5c, 0x81, 0x28, 0x98, 0x38, 0x73, 0xce, 0x10, 0xdb, 0x63, 0xcd, 0x8c, 0x23, 0x9d, 0x68, 0x68, + 0x68, 0x11, 0x0d, 0x05, 0x3d, 0x2d, 0x0d, 0x94, 0xfc, 0x03, 0x4b, 0x77, 0xe5, 0x56, 0x11, 0x6b, + 0xfe, 0x10, 0x90, 0xc7, 0x76, 0xec, 0xfc, 0x22, 0xa9, 0x22, 0x21, 0x51, 0x25, 0xf3, 0xde, 0xf7, + 0xbe, 0xf7, 0xde, 0x37, 0xf3, 0x9e, 0xe1, 0xd9, 0xf8, 0x03, 0x69, 0x33, 0xee, 0x8c, 0x93, 0x01, + 0x15, 0x11, 0x55, 0x54, 0x3a, 0x13, 0x1a, 0x0d, 0xb9, 0x70, 0x0a, 0x07, 0x89, 0x99, 0x23, 0x06, + 0xc4, 0x73, 0x26, 0x27, 0x24, 0x88, 0x47, 0xe4, 0xc4, 0xf1, 0x69, 0x44, 0x05, 0x51, 0x74, 0x68, + 0xc7, 0x82, 0x2b, 0x8e, 0x8c, 0x1c, 0x69, 0x93, 0x98, 0xd9, 0x19, 0xd2, 0x2e, 0x91, 0x47, 0x6f, + 0xfb, 0x4c, 0x8d, 0x92, 0x81, 0xed, 0xf1, 0xd0, 0xf1, 0xb9, 0xcf, 0x1d, 0x1d, 0x30, 0x48, 0x9e, + 0xea, 0x93, 0x3e, 0xe8, 0x7f, 0x39, 0xd1, 0xd1, 0xbb, 0x55, 0xca, 0x90, 0x78, 0x23, 0x16, 0x51, + 0xf1, 0xcc, 0x89, 0xc7, 0x7e, 0x66, 0x90, 0x4e, 0x48, 0x15, 0x71, 0x26, 0x4b, 0xe9, 0x8f, 0x9c, + 0x75, 0x51, 0x22, 0x89, 0x14, 0x0b, 0xe9, 0x52, 0xc0, 0x7b, 0x9b, 0x02, 0xa4, 0x37, 0xa2, 0x21, + 0x59, 0x8a, 0x7b, 0xb0, 0x2e, 0x2e, 0x51, 0x2c, 0x70, 0x58, 0xa4, 0xa4, 0x12, 0x8b, 0x41, 0xd6, + 0x4f, 0x00, 0xf6, 0xce, 0x7c, 0x5f, 0x50, 0x9f, 0x28, 0xc6, 0x23, 0x9c, 0x04, 0x14, 0x7d, 0x07, + 0xe0, 0x5d, 0x2f, 0x48, 0xa4, 0xa2, 0x02, 0xf3, 0x80, 0x3e, 0xa6, 0x01, 0xf5, 0x14, 0x17, 0xd2, + 0x00, 0xc7, 0x7b, 0xf7, 0x0e, 0x4e, 0x1f, 0xd8, 0x95, 0xa0, 0xb3, 0x44, 0x76, 0x3c, 0xf6, 0x33, + 0x83, 0xb4, 0x33, 0x1d, 0xec, 0xc9, 0x89, 0x7d, 0x41, 0x06, 0x34, 0x28, 0x63, 0xdd, 0x57, 0xaf, + 0xa7, 0xfd, 0x46, 0x3a, 0xed, 0xdf, 0x7d, 0xb8, 0x82, 0x18, 0xaf, 0x4c, 0x67, 0xfd, 0xdc, 0x84, + 0x07, 0x35, 0x38, 0xfa, 0x0a, 0x76, 0x32, 0xf2, 0x21, 0x51, 0xc4, 0x00, 0xc7, 0xe0, 0xde, 0xc1, + 0xe9, 0x3b, 0xdb, 0x95, 0xf2, 0x68, 0xf0, 0x35, 0xf5, 0xd4, 0xa7, 0x54, 0x11, 0x17, 0x15, 0x75, + 0xc0, 0xca, 0x86, 0x67, 0xac, 0xe8, 0x1c, 0xb6, 0x45, 0x12, 0x50, 0x69, 0x34, 0x75, 0xa7, 0xaf, + 0xdb, 0xeb, 0x9e, 0x8e, 0x7d, 0xc5, 0x03, 0xe6, 0x3d, 0xcb, 0xe4, 0x72, 0x0f, 0x0b, 0xca, 0x76, + 0x76, 0x92, 0x38, 0x67, 0x40, 0x23, 0xd8, 0x23, 0xf3, 0xba, 0x1a, 0x7b, 0xba, 0xe6, 0x37, 0xd7, + 0x93, 0x2e, 0x5c, 0x84, 0xfb, 0x72, 0x3a, 0xed, 0x2f, 0xde, 0x0e, 0x5e, 0xa4, 0xb5, 0x7e, 0x6c, + 0x42, 0x54, 0x93, 0xc9, 0x65, 0xd1, 0x90, 0x45, 0xfe, 0x0e, 0xd4, 0x7a, 0x04, 0x3b, 0x32, 0xd1, + 0x8e, 0x52, 0xb0, 0xd7, 0xd6, 0xf7, 0xf6, 0x38, 0x47, 0xba, 0x2f, 0x15, 0x94, 0x9d, 0xc2, 0x20, + 0xf1, 0x8c, 0x04, 0x5d, 0xc0, 0x7d, 0xc1, 0x03, 0x8a, 0xe9, 0xd3, 0x42, 0xab, 0x7f, 0xe1, 0xc3, + 0x39, 0xd0, 0xed, 0x15, 0x7c, 0xfb, 0x85, 0x01, 0x97, 0x14, 0xd6, 0x1f, 0x00, 0xbe, 0xb2, 0xac, + 0xcb, 0x05, 0x93, 0x0a, 0x7d, 0xb9, 0xa4, 0x8d, 0xbd, 0xe5, 0xa3, 0x66, 0x32, 0x57, 0x66, 0xd6, + 0x46, 0x69, 0xa9, 0xe9, 0xf2, 0x19, 0x6c, 0x33, 0x45, 0xc3, 0x52, 0x94, 0xfb, 0xeb, 0x9b, 0x58, + 0x2e, 0xaf, 0x7a, 0x4d, 0xe7, 0x19, 0x05, 0xce, 0x99, 0xac, 0xdf, 0x01, 0xec, 0xd5, 0xc0, 0x3b, + 0x68, 0xe2, 0xe3, 0xf9, 0x26, 0xde, 0xd8, 0xae, 0x89, 0xd5, 0xd5, 0xff, 0x0d, 0x20, 0xac, 0x06, + 0x06, 0xf5, 0x61, 0x7b, 0x42, 0xc5, 0x20, 0xdf, 0x27, 0x5d, 0xb7, 0x9b, 0xe1, 0x9f, 0x64, 0x06, + 0x9c, 0xdb, 0xd1, 0x5b, 0xb0, 0x4b, 0x62, 0xf6, 0x91, 0xe0, 0x49, 0x2c, 0x8d, 0x3d, 0x0d, 0x3a, + 0x4c, 0xa7, 0xfd, 0xee, 0xd9, 0xd5, 0x79, 0x6e, 0xc4, 0x95, 0x3f, 0x03, 0x0b, 0x2a, 0x79, 0x22, + 0x3c, 0x2a, 0x8d, 0x56, 0x05, 0xc6, 0xa5, 0x11, 0x57, 0x7e, 0xf4, 0x3e, 0x3c, 0x2c, 0x0f, 0x97, + 0x24, 0xa4, 0xd2, 0x68, 0xeb, 0x80, 0x3b, 0xe9, 0xb4, 0x7f, 0x88, 0xeb, 0x0e, 0x3c, 0x8f, 0x43, + 0x1f, 0xc2, 0x5e, 0xc4, 0xa3, 0x12, 0xf2, 0x39, 0xbe, 0x90, 0xc6, 0x0b, 0x3a, 0x54, 0xcf, 0xe8, + 0xe5, 0xbc, 0x0b, 0x2f, 0x62, 0xad, 0xdf, 0x00, 0x6c, 0xfd, 0xe7, 0x76, 0x98, 0xf5, 0x7d, 0x13, + 0x1e, 0xfc, 0xbf, 0x52, 0x6a, 0x2b, 0x25, 0x1b, 0xc3, 0xdd, 0xee, 0x92, 0xed, 0xc7, 0x70, 0xf3, + 0x12, 0xf9, 0x05, 0xc0, 0xce, 0x8e, 0xb6, 0xc7, 0xc3, 0xf9, 0xb2, 0xcd, 0x0d, 0x65, 0xaf, 0xae, + 0xf7, 0x1b, 0x58, 0xde, 0x00, 0xba, 0x0f, 0x3b, 0xe5, 0xc4, 0xeb, 0x6a, 0xbb, 0x55, 0xf6, 0x72, + 0x29, 0xe0, 0x19, 0x02, 0x1d, 0xc3, 0xd6, 0x98, 0x45, 0x43, 0xa3, 0xa9, 0x91, 0x2f, 0x16, 0xc8, + 0xd6, 0x27, 0x2c, 0x1a, 0x62, 0xed, 0xc9, 0x10, 0x11, 0x09, 0xf3, 0x4f, 0x72, 0x0d, 0x91, 0xcd, + 0x3a, 0xd6, 0x1e, 0xeb, 0x57, 0x00, 0xf7, 0x8b, 0xf7, 0x34, 0xe3, 0x03, 0x6b, 0xf9, 0x4e, 0x21, + 0x24, 0x31, 0x7b, 0x42, 0x85, 0x64, 0x3c, 0x2a, 0xf2, 0xce, 0x5e, 0xfa, 0xd9, 0xd5, 0x79, 0xe1, + 0xc1, 0x35, 0xd4, 0xe6, 0x1a, 0x90, 0x03, 0xbb, 0xd9, 0xaf, 0x8c, 0x89, 0x47, 0x8d, 0x96, 0x86, + 0xdd, 0x29, 0x60, 0xdd, 0xcb, 0xd2, 0x81, 0x2b, 0x8c, 0x6b, 0x5f, 0xdf, 0x9a, 0x8d, 0xe7, 0xb7, + 0x66, 0xe3, 0xe6, 0xd6, 0x6c, 0x7c, 0x9b, 0x9a, 0xe0, 0x3a, 0x35, 0xc1, 0xf3, 0xd4, 0x04, 0x37, + 0xa9, 0x09, 0xfe, 0x4c, 0x4d, 0xf0, 0xc3, 0x5f, 0x66, 0xe3, 0x8b, 0x4e, 0x29, 0xfe, 0x3f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xeb, 0xcc, 0xe2, 0x61, 0x5e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 950995f4..28a4ae3d 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -29,6 +29,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +45,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -181,3 +195,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go index 9cfd71f4..0c697768 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/register.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 06fa6ce8..843d998e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -172,6 +172,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d58a722a..e56cd0f1 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index e4cab832..abbb994f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index bb7d47df..4b77c9c6 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 9cb4935c..8cb2c4be 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xc7, 0xe3, 0x7c, 0x28, 0xf1, 0xe4, 0x46, 0xb9, 0xf5, 0x95, 0xee, 0xb5, 0x2a, 0x5d, 0x27, - 0x0a, 0x2c, 0x2a, 0x95, 0xda, 0xb4, 0x20, 0x60, 0x83, 0x04, 0x66, 0x01, 0x55, 0x4b, 0xa8, 0x06, - 0xc1, 0x02, 0xb1, 0x60, 0xe2, 0x4c, 0xd3, 0x21, 0xf1, 0x87, 0x66, 0xc6, 0x91, 0x2a, 0x36, 0x3c, - 0x00, 0x0b, 0x24, 0x5e, 0x83, 0x15, 0x3b, 0x78, 0x82, 0x2c, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x41, - 0x40, 0x33, 0xb6, 0xe3, 0xa4, 0x69, 0xda, 0xac, 0x22, 0x21, 0xb1, 0x4a, 0xe6, 0x9c, 0xdf, 0xf9, - 0x9f, 0x0f, 0xcf, 0x1c, 0xf0, 0xa0, 0x7f, 0x8f, 0x99, 0xc4, 0xb7, 0xfa, 0x61, 0x07, 0x53, 0x0f, - 0x73, 0xcc, 0xac, 0x21, 0xf6, 0xba, 0x3e, 0xb5, 0x12, 0x07, 0x0a, 0x88, 0x45, 0x3b, 0xc8, 0xb1, - 0x86, 0xdb, 0x1d, 0xcc, 0xd1, 0xb6, 0xd5, 0xc3, 0x1e, 0xa6, 0x88, 0xe3, 0xae, 0x19, 0x50, 0x9f, - 0xfb, 0xda, 0x7f, 0x31, 0x68, 0xa2, 0x80, 0x98, 0x02, 0x34, 0x13, 0x70, 0x7d, 0xab, 0x47, 0xf8, - 0x51, 0xd8, 0x31, 0x1d, 0xdf, 0xb5, 0x7a, 0x7e, 0xcf, 0xb7, 0x24, 0xdf, 0x09, 0x0f, 0xe5, 0x49, - 0x1e, 0xe4, 0xbf, 0x58, 0x67, 0xfd, 0x76, 0x96, 0xd0, 0x45, 0xce, 0x11, 0xf1, 0x30, 0x3d, 0xb6, - 0x82, 0x7e, 0x4f, 0x18, 0x98, 0xe5, 0x62, 0x8e, 0xac, 0xe1, 0x5c, 0xf6, 0x75, 0x6b, 0x51, 0x14, - 0x0d, 0x3d, 0x4e, 0x5c, 0x3c, 0x17, 0x70, 0xe7, 0xaa, 0x00, 0xe6, 0x1c, 0x61, 0x17, 0xcd, 0xc5, - 0xdd, 0x5a, 0x14, 0x17, 0x72, 0x32, 0xb0, 0x88, 0xc7, 0x19, 0xa7, 0xe7, 0x83, 0x5a, 0x5f, 0x15, - 0x50, 0x7d, 0x34, 0x08, 0x19, 0xc7, 0x14, 0xfa, 0x03, 0xac, 0xbd, 0x01, 0x15, 0xd1, 0x48, 0x17, - 0x71, 0xa4, 0x2b, 0x4d, 0x65, 0xa3, 0xba, 0x73, 0xd3, 0xcc, 0xc6, 0x37, 0xd1, 0x35, 0x83, 0x7e, - 0x4f, 0x18, 0x98, 0x29, 0x68, 0x73, 0xb8, 0x6d, 0x3e, 0xeb, 0xbc, 0xc5, 0x0e, 0x7f, 0x8a, 0x39, - 0xb2, 0xb5, 0xd1, 0xb8, 0x91, 0x8b, 0xc6, 0x0d, 0x90, 0xd9, 0xe0, 0x44, 0x55, 0x7b, 0x02, 0x4a, - 0x34, 0x1c, 0x60, 0xa6, 0xe7, 0x9b, 0x85, 0x8d, 0xea, 0xce, 0x35, 0x73, 0xc1, 0xd7, 0x31, 0x0f, - 0xfc, 0x01, 0x71, 0x8e, 0x61, 0x38, 0xc0, 0x76, 0x2d, 0x51, 0x2c, 0x89, 0x13, 0x83, 0xb1, 0x40, - 0xeb, 0x53, 0x1e, 0x68, 0x53, 0xb5, 0xdb, 0xc4, 0xeb, 0x12, 0xaf, 0xb7, 0x82, 0x16, 0xda, 0xa0, - 0xc2, 0x42, 0xe9, 0x48, 0xbb, 0x68, 0x2e, 0xec, 0xe2, 0x79, 0x0c, 0xda, 0x7f, 0x27, 0x8a, 0x95, - 0xc4, 0xc0, 0xe0, 0x44, 0x43, 0xdb, 0x03, 0x65, 0xea, 0x0f, 0x30, 0xc4, 0x87, 0x7a, 0x41, 0x16, - 0xbc, 0x58, 0x0e, 0xc6, 0x9c, 0x5d, 0x4f, 0xe4, 0xca, 0x89, 0x01, 0xa6, 0x0a, 0xad, 0x91, 0x02, - 0xfe, 0x9d, 0x9f, 0xca, 0x3e, 0x61, 0x5c, 0x7b, 0x3d, 0x37, 0x19, 0x73, 0xb9, 0xc9, 0x88, 0x68, - 0x39, 0x97, 0x49, 0x17, 0xa9, 0x65, 0x6a, 0x2a, 0x07, 0xa0, 0x44, 0x38, 0x76, 0xd3, 0x91, 0x6c, - 0x2e, 0xec, 0x61, 0xbe, 0xba, 0xec, 0x03, 0xef, 0x0a, 0x05, 0x18, 0x0b, 0xb5, 0xbe, 0x29, 0xa0, - 0x3e, 0x05, 0xaf, 0xa0, 0x87, 0xdd, 0xd9, 0x1e, 0xae, 0x2f, 0xd5, 0xc3, 0xc5, 0xc5, 0xff, 0x54, - 0x00, 0xc8, 0xae, 0xb0, 0xd6, 0x00, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x2b, 0xcd, 0xc2, 0x86, 0x6a, - 0xab, 0x82, 0x7f, 0x29, 0x0c, 0x30, 0xb6, 0x6b, 0x9b, 0x40, 0x45, 0x01, 0x79, 0x4c, 0xfd, 0x30, - 0x88, 0xd3, 0xab, 0x76, 0x2d, 0x1a, 0x37, 0xd4, 0x87, 0x07, 0xbb, 0xb1, 0x11, 0x66, 0x7e, 0x01, - 0x53, 0xcc, 0xfc, 0x90, 0x3a, 0x98, 0xe9, 0x85, 0x0c, 0x86, 0xa9, 0x11, 0x66, 0x7e, 0xed, 0x2e, - 0xa8, 0xa5, 0x87, 0x36, 0x72, 0x31, 0xd3, 0x8b, 0x32, 0x60, 0x2d, 0x1a, 0x37, 0x6a, 0x70, 0xda, - 0x01, 0x67, 0x39, 0xed, 0x3e, 0xa8, 0x7b, 0xbe, 0x97, 0x22, 0x2f, 0xe0, 0x3e, 0xd3, 0x4b, 0x32, - 0xf4, 0x9f, 0x68, 0xdc, 0xa8, 0xb7, 0x67, 0x5d, 0xf0, 0x3c, 0xdb, 0xfa, 0xa2, 0x80, 0xe2, 0x6f, - 0xb7, 0x54, 0x3e, 0xe4, 0x41, 0xf5, 0xcf, 0x36, 0x99, 0x6c, 0x13, 0xf1, 0x04, 0x57, 0xbb, 0x46, - 0x96, 0x7e, 0x82, 0x57, 0xef, 0x8f, 0xcf, 0x0a, 0xa8, 0xac, 0x68, 0x71, 0xd8, 0xb3, 0x55, 0xff, - 0x7f, 0x79, 0xd5, 0x17, 0x97, 0xfb, 0x0e, 0xa4, 0xf3, 0xd7, 0x6e, 0x80, 0x4a, 0xfa, 0xd8, 0x65, - 0xb1, 0x6a, 0x96, 0x3c, 0xdd, 0x07, 0x70, 0x42, 0x68, 0x4d, 0x50, 0xec, 0x13, 0xaf, 0xab, 0xe7, - 0x25, 0xf9, 0x57, 0x42, 0x16, 0xf7, 0x88, 0xd7, 0x85, 0xd2, 0x23, 0x08, 0x0f, 0xb9, 0x58, 0x5e, - 0x88, 0x29, 0x42, 0x3c, 0x73, 0x28, 0x3d, 0x62, 0x56, 0xe5, 0xe4, 0x32, 0x4d, 0xf4, 0x94, 0x85, - 0x7a, 0xd3, 0xf5, 0xe5, 0x97, 0xa9, 0xef, 0xf2, 0xec, 0x9a, 0x05, 0x54, 0xf1, 0xcb, 0x02, 0xe4, - 0x60, 0xbd, 0x28, 0xb1, 0xb5, 0x04, 0x53, 0xdb, 0xa9, 0x03, 0x66, 0x8c, 0xbd, 0x35, 0x3a, 0x33, - 0x72, 0x27, 0x67, 0x46, 0xee, 0xf4, 0xcc, 0xc8, 0xbd, 0x8f, 0x0c, 0x65, 0x14, 0x19, 0xca, 0x49, - 0x64, 0x28, 0xa7, 0x91, 0xa1, 0x7c, 0x8f, 0x0c, 0xe5, 0xe3, 0x0f, 0x23, 0xf7, 0xaa, 0x9c, 0x4c, - 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x24, 0x6a, 0xfa, 0x45, 0x0a, 0x00, 0x00, + // 833 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xdc, 0x70, 0x02, 0x6b, 0x05, 0xce, 0x2a, 0x50, + 0x44, 0x3a, 0xce, 0x66, 0xef, 0x10, 0xd0, 0x20, 0x71, 0xa6, 0x80, 0xd5, 0x2d, 0x61, 0x35, 0x27, + 0x28, 0x10, 0x05, 0x63, 0x67, 0xce, 0x19, 0xe2, 0x5f, 0x9a, 0x19, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x51, 0x20, 0x51, 0xd1, 0x52, 0x53, 0x51, 0xf2, 0x17, 0xa4, 0xbc, 0xf2, 0xaa, 0x88, 0x35, 0x7f, + 0x08, 0x68, 0xfc, 0x23, 0x4e, 0xe2, 0xf8, 0x2e, 0x55, 0x24, 0xa4, 0xab, 0x76, 0xe7, 0xbd, 0xef, + 0x7d, 0xef, 0x7b, 0x9f, 0x67, 0x5e, 0xe0, 0x27, 0xb3, 0x8f, 0x84, 0xc9, 0x22, 0x6b, 0x96, 0x38, + 0x94, 0x87, 0x54, 0x52, 0x61, 0xcd, 0x69, 0x38, 0x89, 0xb8, 0x55, 0x24, 0x48, 0xcc, 0x2c, 0xee, + 0x10, 0xd7, 0x9a, 0x5f, 0x38, 0x54, 0x92, 0x0b, 0xcb, 0xa3, 0x21, 0xe5, 0x44, 0xd2, 0x89, 0x19, + 0xf3, 0x48, 0x46, 0xe8, 0x8d, 0x1c, 0x68, 0x92, 0x98, 0x99, 0x0a, 0x68, 0x16, 0xc0, 0xb3, 0xbb, + 0x1e, 0x93, 0xd3, 0xc4, 0x31, 0xdd, 0x28, 0xb0, 0xbc, 0xc8, 0x8b, 0xac, 0x0c, 0xef, 0x24, 0x8f, + 0xb3, 0x53, 0x76, 0xc8, 0xfe, 0xcb, 0x79, 0xce, 0x46, 0xf5, 0x86, 0xc4, 0x8f, 0xa7, 0xf5, 0x8e, + 0x67, 0xef, 0x57, 0xc8, 0x80, 0xb8, 0x53, 0x16, 0x52, 0xfe, 0xc4, 0x8a, 0x67, 0x9e, 0x0a, 0x08, + 0x2b, 0xa0, 0x92, 0x58, 0xf3, 0x7a, 0x95, 0xd5, 0x54, 0xc5, 0x93, 0x50, 0xb2, 0x80, 0xd6, 0x0a, + 0x3e, 0x78, 0x51, 0x81, 0x70, 0xa7, 0x34, 0x20, 0xb5, 0xba, 0xfb, 0x4d, 0x75, 0x89, 0x64, 0xbe, + 0xc5, 0x42, 0x29, 0x24, 0xdf, 0x2e, 0x1a, 0xfe, 0x06, 0x60, 0xff, 0x81, 0xe7, 0x71, 0xea, 0x11, + 0xc9, 0xa2, 0x10, 0x27, 0x3e, 0x45, 0x3f, 0x01, 0x78, 0xdb, 0xf5, 0x13, 0x21, 0x29, 0xc7, 0x91, + 0x4f, 0x1f, 0x51, 0x9f, 0xba, 0x32, 0xe2, 0x42, 0x07, 0xe7, 0x47, 0xa3, 0x93, 0x7b, 0xf7, 0xcd, + 0xca, 0xf9, 0x55, 0x23, 0x33, 0x9e, 0x79, 0x2a, 0x20, 0x4c, 0xe5, 0x83, 0x39, 0xbf, 0x30, 0xaf, + 0x88, 0x43, 0xfd, 0xb2, 0xd6, 0x7e, 0x73, 0xb1, 0x1c, 0xb4, 0xd2, 0xe5, 0xe0, 0xf6, 0xa7, 0x3b, + 0x88, 0xf1, 0xce, 0x76, 0xc3, 0xdf, 0xdb, 0xf0, 0x64, 0x0d, 0x8e, 0xbe, 0x83, 0x3d, 0x45, 0x3e, + 0x21, 0x92, 0xe8, 0xe0, 0x1c, 0x8c, 0x4e, 0xee, 0xbd, 0xb7, 0x9f, 0x94, 0x2f, 0x9d, 0xef, 0xa9, + 0x2b, 0xbf, 0xa0, 0x92, 0xd8, 0xa8, 0xd0, 0x01, 0xab, 0x18, 0x5e, 0xb1, 0xa2, 0xcf, 0x61, 0x97, + 0x27, 0x3e, 0x15, 0x7a, 0x3b, 0x9b, 0xf4, 0x6d, 0xb3, 0xe1, 0x8e, 0x99, 0xd7, 0x91, 0xcf, 0xdc, + 0x27, 0xca, 0x2d, 0xfb, 0xb4, 0x60, 0xec, 0xaa, 0x93, 0xc0, 0x39, 0x01, 0xf2, 0x60, 0x9f, 0x6c, + 0xda, 0xaa, 0x1f, 0x65, 0x92, 0x47, 0x8d, 0x9c, 0x5b, 0x9f, 0xc1, 0x7e, 0x2d, 0x5d, 0x0e, 0xb6, + 0xbf, 0x0d, 0xde, 0x66, 0x1d, 0xfe, 0xda, 0x86, 0x68, 0xcd, 0x24, 0x9b, 0x85, 0x13, 0x16, 0x7a, + 0x07, 0xf0, 0x6a, 0x0c, 0x7b, 0x22, 0xc9, 0x12, 0xa5, 0x5d, 0xe7, 0x8d, 0xa3, 0x3d, 0xca, 0x81, + 0xf6, 0xab, 0x05, 0x63, 0xaf, 0x08, 0x08, 0xbc, 0xe2, 0x40, 0x0f, 0xe1, 0x31, 0x8f, 0x7c, 0x8a, + 0xe9, 0xe3, 0xc2, 0xa9, 0x66, 0x3a, 0x9c, 0xe3, 0xec, 0x7e, 0x41, 0x77, 0x5c, 0x04, 0x70, 0xc9, + 0x30, 0x5c, 0x00, 0xf8, 0x7a, 0xdd, 0x95, 0x2b, 0x26, 0x24, 0xfa, 0xb6, 0xe6, 0x8c, 0xb9, 0xe7, + 0x85, 0x66, 0x22, 0xf7, 0x65, 0x35, 0x45, 0x19, 0x59, 0x73, 0xe5, 0x1a, 0x76, 0x99, 0xa4, 0x41, + 0x69, 0xc9, 0x9d, 0xc6, 0x19, 0xea, 0xea, 0xaa, 0x9b, 0x74, 0xa9, 0x18, 0x70, 0x4e, 0x34, 0xfc, + 0x0b, 0xc0, 0xfe, 0x1a, 0xf8, 0x00, 0x33, 0x5c, 0x6e, 0xce, 0xf0, 0xce, 0x5e, 0x33, 0xec, 0x16, + 0xff, 0x2f, 0x80, 0xb0, 0x7a, 0x2b, 0x68, 0x00, 0xbb, 0x73, 0xca, 0x9d, 0x7c, 0x93, 0x68, 0xb6, + 0xa6, 0xf0, 0x5f, 0xab, 0x00, 0xce, 0xe3, 0xe8, 0x0e, 0xd4, 0x48, 0xcc, 0x3e, 0xe3, 0x51, 0x12, + 0xe7, 0xed, 0x35, 0xfb, 0x34, 0x5d, 0x0e, 0xb4, 0x07, 0xd7, 0x97, 0x79, 0x10, 0x57, 0x79, 0x05, + 0xe6, 0x54, 0x44, 0x09, 0x77, 0xa9, 0xd0, 0x8f, 0x2a, 0x30, 0x2e, 0x83, 0xb8, 0xca, 0xa3, 0x0f, + 0xe1, 0x69, 0x79, 0x18, 0x93, 0x80, 0x0a, 0xbd, 0x93, 0x15, 0xdc, 0x4a, 0x97, 0x83, 0x53, 0xbc, + 0x9e, 0xc0, 0x9b, 0x38, 0xf4, 0x31, 0xec, 0x87, 0x51, 0x58, 0x42, 0xbe, 0xc2, 0x57, 0x42, 0xef, + 0x66, 0xa5, 0xd9, 0xfb, 0x1c, 0x6f, 0xa6, 0xf0, 0x36, 0x76, 0xf8, 0x27, 0x80, 0x9d, 0xff, 0xdb, + 0xf6, 0x1a, 0xfe, 0xdc, 0x86, 0x27, 0x2f, 0xb7, 0xc9, 0x6a, 0x9b, 0xa8, 0x27, 0x78, 0xd8, 0x35, + 0xb2, 0xf7, 0x13, 0x7c, 0xf1, 0xfe, 0xf8, 0x03, 0xc0, 0xde, 0x81, 0x16, 0x87, 0xbd, 0xa9, 0xfa, + 0xad, 0xe7, 0xab, 0xde, 0x2d, 0xf7, 0x07, 0x58, 0xfa, 0x8f, 0xde, 0x85, 0xbd, 0xf2, 0xb1, 0x67, + 0x62, 0xb5, 0xaa, 0x79, 0xb9, 0x0f, 0xf0, 0x0a, 0x81, 0xce, 0x61, 0x67, 0xc6, 0xc2, 0x89, 0xde, + 0xce, 0x90, 0xaf, 0x14, 0xc8, 0xce, 0x43, 0x16, 0x4e, 0x70, 0x96, 0x51, 0x88, 0x90, 0x04, 0xf9, + 0x0f, 0xf1, 0x1a, 0x42, 0x3d, 0x73, 0x9c, 0x65, 0x94, 0x57, 0xc7, 0xc5, 0x65, 0x5a, 0xf1, 0x81, + 0x46, 0xbe, 0x75, 0x7d, 0xed, 0x7d, 0xf4, 0x3d, 0xbf, 0x3b, 0xb2, 0xa0, 0xa6, 0xfe, 0x8a, 0x98, + 0xb8, 0x54, 0xef, 0x64, 0xb0, 0x5b, 0x05, 0x4c, 0x1b, 0x97, 0x09, 0x5c, 0x61, 0xec, 0xbb, 0x8b, + 0x1b, 0xa3, 0xf5, 0xf4, 0xc6, 0x68, 0x3d, 0xbb, 0x31, 0x5a, 0x3f, 0xa6, 0x06, 0x58, 0xa4, 0x06, + 0x78, 0x9a, 0x1a, 0xe0, 0x59, 0x6a, 0x80, 0xbf, 0x53, 0x03, 0xfc, 0xf2, 0x8f, 0xd1, 0xfa, 0xe6, + 0xb8, 0x70, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x99, 0xaf, 0xff, 0x74, 0x0b, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index fe92c3c0..975de109 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1beta1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -180,3 +195,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go index 8f60f019..c8526a65 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/register.go +++ b/vendor/k8s.io/api/rbac/v1beta1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index ee3964a3..091fc1dc 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -171,6 +171,19 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 16a265c5..6180d6d4 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 92272764..ac238956 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index bf901598..e10d07ff 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 0f51edaf..625cae7b 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -62,3 +62,4 @@ message PriorityClassList { // items is the list of PriorityClasses repeated PriorityClass items = 2; } + diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go index 91ce6e0c..24689f0a 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/register.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PriorityClass{}, diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index a3ae5c0f..fad4db66 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClass).DeepCopyInto(out.(*PriorityClass)) - return nil - }, InType: reflect.TypeOf(&PriorityClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClassList).DeepCopyInto(out.(*PriorityClassList)) - return nil - }, InType: reflect.TypeOf(&PriorityClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index ae1bfb91..05a62c56 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto index 5550d9a2..430319d7 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -73,3 +73,4 @@ message PodPresetSpec { // +optional repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; } + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go index 2f39af0f..eee278d9 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/settings/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodPreset{}, diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 07e6ab72..2c925622 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -22,36 +22,9 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPreset).DeepCopyInto(out.(*PodPreset)) - return nil - }, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetList).DeepCopyInto(out.(*PodPresetList)) - return nil - }, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetSpec).DeepCopyInto(out.(*PodPresetSpec)) - return nil - }, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPreset) DeepCopyInto(out *PodPreset) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 0672c58e..8f4a4045 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 4befedff..7157b72f 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,43 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 593 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6f, 0xd3, 0x3e, - 0x18, 0xc6, 0x9b, 0x76, 0xfd, 0x6b, 0x73, 0x37, 0xfd, 0xab, 0x30, 0xa4, 0xa8, 0x87, 0xb4, 0x1a, - 0x97, 0x0a, 0x09, 0x7b, 0xdd, 0x06, 0x9a, 0x90, 0x40, 0xa2, 0x68, 0x12, 0x48, 0x9b, 0x56, 0x05, - 0x89, 0x03, 0xe2, 0x80, 0x9b, 0xbd, 0x64, 0x26, 0x89, 0x1d, 0xd9, 0x4e, 0xa0, 0x37, 0x3e, 0x02, - 0x9f, 0x87, 0x13, 0xc7, 0x1d, 0x77, 0xdc, 0x29, 0x62, 0xe1, 0x5b, 0x70, 0x42, 0x49, 0xca, 0x92, - 0xad, 0x9d, 0xd8, 0xcd, 0x7e, 0xdf, 0xe7, 0xf7, 0xd8, 0x7e, 0xfd, 0xa0, 0xe7, 0xfe, 0xbe, 0xc2, - 0x4c, 0x10, 0x3f, 0x9e, 0x82, 0xe4, 0xa0, 0x41, 0x91, 0x04, 0xf8, 0x89, 0x90, 0x64, 0xde, 0xa0, - 0x11, 0x23, 0x4a, 0x0b, 0x49, 0x3d, 0x20, 0xc9, 0x88, 0x78, 0xc0, 0x41, 0x52, 0x0d, 0x27, 0x38, - 0x92, 0x42, 0x0b, 0xf3, 0x7e, 0x29, 0xc3, 0x34, 0x62, 0x78, 0x2e, 0xc3, 0xc9, 0xa8, 0xf7, 0xc8, - 0x63, 0xfa, 0x34, 0x9e, 0x62, 0x57, 0x84, 0xc4, 0x13, 0x9e, 0x20, 0x85, 0x7a, 0x1a, 0x7f, 0x2c, - 0x76, 0xc5, 0xa6, 0x58, 0x95, 0x2e, 0xbd, 0x87, 0x4b, 0x0f, 0x9b, 0x82, 0xa6, 0x0b, 0x27, 0xf6, - 0xf6, 0x2a, 0x6d, 0x48, 0xdd, 0x53, 0xc6, 0x41, 0xce, 0x48, 0xe4, 0x7b, 0x79, 0x41, 0x91, 0x10, - 0x34, 0x5d, 0x72, 0xcf, 0x1e, 0xb9, 0x8d, 0x92, 0x31, 0xd7, 0x2c, 0x84, 0x05, 0xe0, 0xc9, 0xbf, - 0x00, 0xe5, 0x9e, 0x42, 0x48, 0x17, 0xb8, 0xdd, 0xdb, 0xb8, 0x58, 0xb3, 0x80, 0x30, 0xae, 0x95, - 0x96, 0x37, 0xa1, 0xad, 0x1f, 0x2b, 0x68, 0xfd, 0x4d, 0xf9, 0xee, 0x97, 0x01, 0x55, 0xca, 0xfc, - 0x80, 0x56, 0xf3, 0x97, 0x9c, 0x50, 0x4d, 0x2d, 0x63, 0x60, 0x0c, 0x3b, 0x3b, 0xdb, 0xb8, 0x9a, - 0xf4, 0x95, 0x31, 0x8e, 0x7c, 0x2f, 0x2f, 0x28, 0x9c, 0xab, 0x71, 0x32, 0xc2, 0xc7, 0xd3, 0x4f, - 0xe0, 0xea, 0x23, 0xd0, 0x74, 0x6c, 0x9e, 0xa5, 0xfd, 0x46, 0x96, 0xf6, 0x51, 0x55, 0x73, 0xae, - 0x5c, 0xcd, 0xc7, 0xa8, 0x13, 0x49, 0x91, 0x30, 0xc5, 0x04, 0x07, 0x69, 0x35, 0x07, 0xc6, 0x70, - 0x6d, 0x7c, 0x6f, 0x8e, 0x74, 0x26, 0x55, 0xcb, 0xa9, 0xeb, 0x4c, 0x0f, 0xa1, 0x88, 0x4a, 0x1a, - 0x82, 0x06, 0xa9, 0xac, 0xd6, 0xa0, 0x35, 0xec, 0xec, 0xec, 0xe2, 0xa5, 0x21, 0xc0, 0xf5, 0x17, - 0xe1, 0xc9, 0x15, 0x75, 0xc0, 0xb5, 0x9c, 0x55, 0xb7, 0xab, 0x1a, 0x4e, 0xcd, 0xda, 0xf4, 0xd1, - 0x86, 0x04, 0x37, 0xa0, 0x2c, 0x9c, 0x88, 0x80, 0xb9, 0x33, 0x6b, 0xa5, 0xb8, 0xe1, 0x41, 0x96, - 0xf6, 0x37, 0x9c, 0x7a, 0xe3, 0x77, 0xda, 0xdf, 0xae, 0xc5, 0xc7, 0x15, 0x32, 0xcf, 0x0e, 0x9e, - 0x80, 0x54, 0x4c, 0x69, 0xe0, 0xfa, 0xad, 0x08, 0xe2, 0x10, 0xae, 0x31, 0xce, 0x75, 0x6f, 0x73, - 0x0f, 0xad, 0x87, 0x22, 0xe6, 0xfa, 0x38, 0xd2, 0x4c, 0x70, 0x65, 0xb5, 0x07, 0xad, 0xe1, 0xda, - 0xb8, 0x9b, 0xa5, 0xfd, 0xf5, 0xa3, 0x5a, 0xdd, 0xb9, 0xa6, 0x32, 0x0f, 0xd1, 0x26, 0x0d, 0x02, - 0xf1, 0xb9, 0x3c, 0xe0, 0xe0, 0x4b, 0x44, 0x79, 0x3e, 0x25, 0xeb, 0xbf, 0x81, 0x31, 0x5c, 0x1d, - 0x5b, 0x59, 0xda, 0xdf, 0x7c, 0xb1, 0xa4, 0xef, 0x2c, 0xa5, 0x7a, 0xcf, 0xd0, 0xff, 0x37, 0x66, - 0x64, 0x76, 0x51, 0xcb, 0x87, 0x59, 0x11, 0x80, 0x35, 0x27, 0x5f, 0x9a, 0x9b, 0xa8, 0x9d, 0xd0, - 0x20, 0x86, 0xf2, 0xbf, 0x9c, 0x72, 0xf3, 0xb4, 0xb9, 0x6f, 0x6c, 0x7d, 0x37, 0x50, 0xb7, 0x3e, - 0xf0, 0x43, 0xa6, 0xb4, 0xf9, 0x7e, 0x21, 0x46, 0xf8, 0x6e, 0x31, 0xca, 0xe9, 0x22, 0x44, 0xdd, - 0xf9, 0x37, 0xad, 0xfe, 0xad, 0xd4, 0x22, 0xf4, 0x0a, 0xb5, 0x99, 0x86, 0x50, 0x59, 0xcd, 0x22, - 0x06, 0x0f, 0xee, 0x10, 0x83, 0xf1, 0xc6, 0xdc, 0xaf, 0xfd, 0x3a, 0x27, 0x9d, 0xd2, 0x60, 0x3c, - 0x3c, 0xbb, 0xb4, 0x1b, 0xe7, 0x97, 0x76, 0xe3, 0xe2, 0xd2, 0x6e, 0x7c, 0xcd, 0x6c, 0xe3, 0x2c, - 0xb3, 0x8d, 0xf3, 0xcc, 0x36, 0x2e, 0x32, 0xdb, 0xf8, 0x99, 0xd9, 0xc6, 0xb7, 0x5f, 0x76, 0xe3, - 0x5d, 0x33, 0x19, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x57, 0xe7, 0x15, 0xb0, 0x04, 0x00, - 0x00, + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4f, 0x6f, 0xd3, 0x3c, + 0x18, 0x6f, 0xda, 0xb7, 0x2f, 0x9b, 0xbb, 0x89, 0x2e, 0x0c, 0x29, 0xea, 0x21, 0xa9, 0xc6, 0xa5, + 0x9a, 0x84, 0xb3, 0x6e, 0x03, 0x4d, 0x48, 0x20, 0x11, 0x34, 0x09, 0xa4, 0x4d, 0xab, 0x82, 0x34, + 0x21, 0xc4, 0x01, 0x37, 0x7d, 0xc8, 0x4c, 0x13, 0x3b, 0xb2, 0x9d, 0x40, 0x6f, 0x7c, 0x04, 0xce, + 0x7c, 0x14, 0x3e, 0xc1, 0x8e, 0x3b, 0xee, 0x14, 0xb1, 0xf0, 0x2d, 0x76, 0x42, 0x49, 0xca, 0x9a, + 0xad, 0x9d, 0xd8, 0x2d, 0xfe, 0xfd, 0xb3, 0x9f, 0x27, 0x3f, 0xf4, 0x62, 0xbc, 0x27, 0x31, 0xe5, + 0xf6, 0x38, 0x1e, 0x82, 0x60, 0xa0, 0x40, 0xda, 0x09, 0xb0, 0x11, 0x17, 0xf6, 0x94, 0x20, 0x11, + 0xb5, 0xa5, 0xe2, 0x82, 0xf8, 0x60, 0x27, 0x7d, 0xdb, 0x07, 0x06, 0x82, 0x28, 0x18, 0xe1, 0x48, + 0x70, 0xc5, 0xf5, 0x87, 0xa5, 0x0c, 0x93, 0x88, 0xe2, 0xa9, 0x0c, 0x27, 0xfd, 0xce, 0x63, 0x9f, + 0xaa, 0x93, 0x78, 0x88, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x5d, 0xa8, 0x87, 0xf1, 0xa7, 0xe2, + 0x54, 0x1c, 0x8a, 0xaf, 0x32, 0xa5, 0xb3, 0xb9, 0xf0, 0xb2, 0x21, 0x28, 0x32, 0x77, 0x63, 0x67, + 0x77, 0xa6, 0x0d, 0x89, 0x77, 0x42, 0x19, 0x88, 0x89, 0x1d, 0x8d, 0xfd, 0x1c, 0x90, 0x76, 0x08, + 0x8a, 0x2c, 0x78, 0x67, 0xc7, 0xbe, 0xcd, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, 0x86, 0xa7, 0xff, + 0x32, 0x48, 0xef, 0x04, 0x42, 0x32, 0xe7, 0xdb, 0xb9, 0xcd, 0x17, 0x2b, 0x1a, 0xd8, 0x94, 0x29, + 0xa9, 0xc4, 0x4d, 0xd3, 0xc6, 0x8f, 0x26, 0x5a, 0x79, 0x5b, 0xce, 0xfd, 0x2a, 0x20, 0x52, 0xea, + 0x1f, 0xd1, 0x52, 0x3e, 0xc9, 0x88, 0x28, 0x62, 0x68, 0x5d, 0xad, 0xd7, 0xda, 0xde, 0xc2, 0xb3, + 0x4d, 0x5f, 0x05, 0xe3, 0x68, 0xec, 0xe7, 0x80, 0xc4, 0xb9, 0x1a, 0x27, 0x7d, 0x7c, 0x34, 0xfc, + 0x0c, 0x9e, 0x3a, 0x04, 0x45, 0x1c, 0xfd, 0x34, 0xb5, 0x6a, 0x59, 0x6a, 0xa1, 0x19, 0xe6, 0x5e, + 0xa5, 0xea, 0x4f, 0x50, 0x2b, 0x12, 0x3c, 0xa1, 0x92, 0x72, 0x06, 0xc2, 0xa8, 0x77, 0xb5, 0xde, + 0xb2, 0xf3, 0x60, 0x6a, 0x69, 0x0d, 0x66, 0x94, 0x5b, 0xd5, 0xe9, 0x3e, 0x42, 0x11, 0x11, 0x24, + 0x04, 0x05, 0x42, 0x1a, 0x8d, 0x6e, 0xa3, 0xd7, 0xda, 0xde, 0xc1, 0x0b, 0x4b, 0x80, 0xab, 0x13, + 0xe1, 0xc1, 0x95, 0x6b, 0x9f, 0x29, 0x31, 0x99, 0xbd, 0x6e, 0x46, 0xb8, 0x95, 0x68, 0x7d, 0x8c, + 0x56, 0x05, 0x78, 0x01, 0xa1, 0xe1, 0x80, 0x07, 0xd4, 0x9b, 0x18, 0xff, 0x15, 0x2f, 0xdc, 0xcf, + 0x52, 0x6b, 0xd5, 0xad, 0x12, 0x97, 0xa9, 0xb5, 0x55, 0xa9, 0x8f, 0xc7, 0x45, 0xde, 0x1d, 0x3c, + 0x00, 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x31, 0x0f, 0xe2, 0x10, 0xae, 0x79, 0xdc, 0xeb, 0xd9, 0xfa, + 0x2e, 0x5a, 0x09, 0x79, 0xcc, 0xd4, 0x51, 0xa4, 0x28, 0x67, 0xd2, 0x68, 0x76, 0x1b, 0xbd, 0x65, + 0xa7, 0x9d, 0xa5, 0xd6, 0xca, 0x61, 0x05, 0x77, 0xaf, 0xa9, 0xf4, 0x03, 0xb4, 0x4e, 0x82, 0x80, + 0x7f, 0x29, 0x2f, 0xd8, 0xff, 0x1a, 0x11, 0x96, 0x6f, 0xc9, 0xf8, 0xbf, 0xab, 0xf5, 0x96, 0x1c, + 0x23, 0x4b, 0xad, 0xf5, 0x97, 0x0b, 0x78, 0x77, 0xa1, 0x4b, 0x7f, 0x87, 0xd6, 0x92, 0x02, 0x72, + 0x28, 0x1b, 0x51, 0xe6, 0x1f, 0xf2, 0x11, 0x18, 0xf7, 0x8a, 0xa1, 0x37, 0xb3, 0xd4, 0x5a, 0x3b, + 0xbe, 0x49, 0x5e, 0x2e, 0x02, 0xdd, 0xf9, 0x90, 0xce, 0x73, 0x74, 0xff, 0xc6, 0xf6, 0xf5, 0x36, + 0x6a, 0x8c, 0x61, 0x52, 0x54, 0x6b, 0xd9, 0xcd, 0x3f, 0xf5, 0x75, 0xd4, 0x4c, 0x48, 0x10, 0x43, + 0xd9, 0x04, 0xb7, 0x3c, 0x3c, 0xab, 0xef, 0x69, 0x1b, 0x3f, 0x35, 0xd4, 0xae, 0xfe, 0xca, 0x03, + 0x2a, 0x95, 0xfe, 0x61, 0xae, 0xa0, 0xf8, 0x6e, 0x05, 0xcd, 0xdd, 0x45, 0x3d, 0xdb, 0xd3, 0x02, + 0x2c, 0xfd, 0x45, 0x2a, 0xe5, 0x7c, 0x8d, 0x9a, 0x54, 0x41, 0x28, 0x8d, 0x7a, 0x51, 0xb0, 0x47, + 0x77, 0x28, 0x98, 0xb3, 0x3a, 0xcd, 0x6b, 0xbe, 0xc9, 0x9d, 0x6e, 0x19, 0xe0, 0xf4, 0x4e, 0x2f, + 0xcc, 0xda, 0xd9, 0x85, 0x59, 0x3b, 0xbf, 0x30, 0x6b, 0xdf, 0x32, 0x53, 0x3b, 0xcd, 0x4c, 0xed, + 0x2c, 0x33, 0xb5, 0xf3, 0xcc, 0xd4, 0x7e, 0x65, 0xa6, 0xf6, 0xfd, 0xb7, 0x59, 0x7b, 0x5f, 0x4f, + 0xfa, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x56, 0xcc, 0xfd, 0x0a, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 7556e090..939ebde6 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. @@ -75,3 +82,4 @@ message StorageClassList { // Items is the list of StorageClasses repeated StorageClass items = 2; } + diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 59c4e59b..c058add8 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 9afdafb6..288d40ab 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index b4be857d..3eb9bdab 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 50c707ab..a2b2f8e7 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go new file mode 100644 index 00000000..aa94aff7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true +package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go new file mode 100644 index 00000000..3d55c6ec --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -0,0 +1,1523 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + + It has these top-level messages: + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") +} +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n8, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x3d, 0x6f, 0xdb, 0x46, + 0x18, 0xc7, 0x45, 0x49, 0xb6, 0xe5, 0x53, 0x5f, 0x8c, 0x83, 0xd0, 0x0a, 0x2a, 0x40, 0x19, 0x9a, + 0xdc, 0xa2, 0x3e, 0x56, 0x76, 0x51, 0x18, 0xdd, 0x44, 0xd8, 0x43, 0x51, 0xcb, 0x2d, 0xe8, 0xa2, + 0x43, 0xdb, 0xa1, 0x27, 0xf2, 0x31, 0x45, 0x4b, 0x7c, 0xc1, 0xdd, 0x51, 0x88, 0xb7, 0x4c, 0x99, + 0xb3, 0xe5, 0x1b, 0xe4, 0xb3, 0x68, 0x8b, 0x47, 0x4f, 0x42, 0xcc, 0x7c, 0x8b, 0x2c, 0x09, 0x78, + 0x3c, 0x89, 0xb2, 0x29, 0x25, 0xb6, 0x37, 0x3e, 0xcf, 0x3d, 0xff, 0xdf, 0xf3, 0x76, 0x47, 0x74, + 0x3c, 0x3a, 0xe2, 0xc4, 0x0b, 0x8d, 0x51, 0x3c, 0x00, 0x16, 0x80, 0x00, 0x6e, 0x4c, 0x20, 0x70, + 0x42, 0x66, 0xa8, 0x03, 0x1a, 0x79, 0x06, 0x17, 0x21, 0xa3, 0x2e, 0x18, 0x93, 0x2e, 0x1d, 0x47, + 0x43, 0xda, 0x35, 0x5c, 0x08, 0x80, 0x51, 0x01, 0x0e, 0x89, 0x58, 0x28, 0x42, 0xfc, 0x5d, 0x16, + 0x4c, 0x68, 0xe4, 0x11, 0x15, 0x4c, 0xe6, 0xc1, 0xad, 0x7d, 0xd7, 0x13, 0xc3, 0x78, 0x40, 0xec, + 0xd0, 0x37, 0xdc, 0xd0, 0x0d, 0x0d, 0xa9, 0x19, 0xc4, 0x17, 0xd2, 0x92, 0x86, 0xfc, 0xca, 0x58, + 0xad, 0x7e, 0x9e, 0x18, 0x9e, 0x09, 0x08, 0xb8, 0x17, 0x06, 0x7c, 0x9f, 0x46, 0x1e, 0x07, 0x36, + 0x01, 0x66, 0x44, 0x23, 0x37, 0x3d, 0xe3, 0x77, 0x03, 0x8c, 0x49, 0x77, 0x00, 0xa2, 0x58, 0x5a, + 0xeb, 0xe7, 0x1c, 0xe7, 0x53, 0x7b, 0xe8, 0x05, 0xc0, 0xae, 0x72, 0x86, 0x0f, 0x82, 0x1a, 0x93, + 0xa2, 0xca, 0x58, 0xa7, 0x62, 0x71, 0x20, 0x3c, 0x1f, 0x0a, 0x82, 0x5f, 0x3e, 0x27, 0xe0, 0xf6, + 0x10, 0x7c, 0x5a, 0xd0, 0x1d, 0xae, 0xd3, 0xc5, 0xc2, 0x1b, 0x1b, 0x5e, 0x20, 0xb8, 0x60, 0xf7, + 0x45, 0x9d, 0xd7, 0x65, 0xb4, 0xf3, 0x77, 0x38, 0x8e, 0x7d, 0xe8, 0x09, 0x41, 0xed, 0xa1, 0x0f, + 0x81, 0xc0, 0xff, 0xa3, 0x5a, 0xda, 0x8d, 0x43, 0x05, 0x6d, 0x6a, 0xbb, 0xda, 0x5e, 0xfd, 0xe0, + 0x27, 0x92, 0xaf, 0x65, 0x01, 0x27, 0xd1, 0xc8, 0x4d, 0x1d, 0x9c, 0xa4, 0xd1, 0x64, 0xd2, 0x25, + 0x7f, 0x0c, 0x2e, 0xc1, 0x16, 0x7d, 0x10, 0xd4, 0xc4, 0xd3, 0x59, 0xbb, 0x94, 0xcc, 0xda, 0x28, + 0xf7, 0x59, 0x0b, 0x2a, 0x3e, 0x47, 0x55, 0x1e, 0x81, 0xdd, 0x2c, 0x4b, 0x7a, 0x97, 0x7c, 0x62, + 0xe9, 0xe4, 0x7e, 0x79, 0xe7, 0x11, 0xd8, 0xe6, 0x17, 0x0a, 0x5f, 0x4d, 0x2d, 0x4b, 0xc2, 0xf0, + 0xbf, 0x68, 0x93, 0x0b, 0x2a, 0x62, 0xde, 0xac, 0x48, 0xec, 0xe1, 0xe3, 0xb0, 0x52, 0x6a, 0x7e, + 0xa5, 0xc0, 0x9b, 0x99, 0x6d, 0x29, 0x64, 0x67, 0xaa, 0xa1, 0xc6, 0x7d, 0xc9, 0xa9, 0xc7, 0x05, + 0xfe, 0xaf, 0x30, 0x2c, 0xf2, 0xb0, 0x61, 0xa5, 0x6a, 0x39, 0xaa, 0x1d, 0x95, 0xb2, 0x36, 0xf7, + 0x2c, 0x0d, 0xca, 0x42, 0x1b, 0x9e, 0x00, 0x9f, 0x37, 0xcb, 0xbb, 0x95, 0xbd, 0xfa, 0xc1, 0xfe, + 0xa3, 0x5a, 0x32, 0xbf, 0x54, 0xe4, 0x8d, 0xdf, 0x52, 0x86, 0x95, 0xa1, 0x3a, 0x17, 0xe8, 0x9b, + 0x42, 0xf3, 0x61, 0xcc, 0x6c, 0xc0, 0xa7, 0xa8, 0x11, 0x01, 0xe3, 0x1e, 0x17, 0x10, 0x88, 0x2c, + 0xe6, 0x8c, 0xfa, 0x20, 0xfb, 0xda, 0x36, 0x9b, 0xc9, 0xac, 0xdd, 0xf8, 0x73, 0xc5, 0xb9, 0xb5, + 0x52, 0xd5, 0x79, 0xb3, 0x62, 0x64, 0xe9, 0xba, 0xf0, 0x8f, 0xa8, 0x46, 0xa5, 0x07, 0x98, 0x42, + 0x2f, 0x46, 0xd0, 0x53, 0x7e, 0x6b, 0x11, 0x21, 0xd7, 0x2a, 0xcb, 0x53, 0xb7, 0xe5, 0x91, 0x6b, + 0x95, 0xd2, 0xa5, 0xb5, 0x4a, 0xdb, 0x52, 0xc8, 0xb4, 0x94, 0x20, 0x74, 0xb2, 0x2e, 0x2b, 0x77, + 0x4b, 0x39, 0x53, 0x7e, 0x6b, 0x11, 0xd1, 0xf9, 0x50, 0x59, 0x31, 0x3a, 0x79, 0x3f, 0x96, 0x7a, + 0x72, 0x64, 0x4f, 0xb5, 0x42, 0x4f, 0xce, 0xa2, 0x27, 0x07, 0xbf, 0xd2, 0x10, 0xa6, 0x0b, 0x44, + 0x7f, 0x7e, 0x7f, 0xb2, 0x25, 0xff, 0xfe, 0x84, 0x7b, 0x4b, 0x7a, 0x05, 0xda, 0x49, 0x20, 0xd8, + 0x95, 0xd9, 0x52, 0x55, 0xe0, 0x62, 0x80, 0xb5, 0xa2, 0x04, 0x7c, 0x89, 0xea, 0x99, 0xf7, 0x84, + 0xb1, 0x90, 0xa9, 0x97, 0xb4, 0xf7, 0x80, 0x8a, 0x64, 0xbc, 0xa9, 0x27, 0xb3, 0x76, 0xbd, 0x97, + 0x03, 0xde, 0xcf, 0xda, 0xf5, 0xa5, 0x73, 0x6b, 0x19, 0x9e, 0xe6, 0x72, 0x20, 0xcf, 0x55, 0x7d, + 0x4a, 0xae, 0x63, 0x58, 0x9f, 0x6b, 0x09, 0xde, 0x3a, 0x41, 0xdf, 0xae, 0x19, 0x11, 0xde, 0x41, + 0x95, 0x11, 0x5c, 0x65, 0x37, 0xd1, 0x4a, 0x3f, 0x71, 0x03, 0x6d, 0x4c, 0xe8, 0x38, 0xce, 0x6e, + 0xdc, 0xb6, 0x95, 0x19, 0xbf, 0x96, 0x8f, 0xb4, 0xce, 0x0b, 0x0d, 0x2d, 0xe7, 0xc0, 0xa7, 0xa8, + 0x9a, 0xfe, 0x93, 0xd5, 0xcb, 0xff, 0xe1, 0x61, 0x2f, 0xff, 0x2f, 0xcf, 0x87, 0xfc, 0x0f, 0x96, + 0x5a, 0x96, 0xa4, 0xe0, 0xef, 0xd1, 0x96, 0x0f, 0x9c, 0x53, 0x57, 0x65, 0x36, 0xbf, 0x56, 0x41, + 0x5b, 0xfd, 0xcc, 0x6d, 0xcd, 0xcf, 0x4d, 0x32, 0xbd, 0xd5, 0x4b, 0xd7, 0xb7, 0x7a, 0xe9, 0xe6, + 0x56, 0x2f, 0x3d, 0x4f, 0x74, 0x6d, 0x9a, 0xe8, 0xda, 0x75, 0xa2, 0x6b, 0x37, 0x89, 0xae, 0xbd, + 0x4d, 0x74, 0xed, 0xe5, 0x3b, 0xbd, 0xf4, 0x4f, 0x6d, 0x3e, 0xb8, 0x8f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x68, 0x82, 0x7b, 0x73, 0x9e, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto new file mode 100644 index 00000000..c9421cf0 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1alpha1; + +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go new file mode 100644 index 00000000..7b81ee49 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VolumeAttachment{}, + &VolumeAttachmentList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go new file mode 100644 index 00000000..964bb5f7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..faca8e93 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..19ae9485 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,188 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index 6137d7a4..8957a4cf 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index b31d6f12..f2c8ea96 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,42 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4f, 0xd4, 0x40, - 0x18, 0xc6, 0xb7, 0x2c, 0xab, 0x30, 0x0b, 0x71, 0x53, 0x39, 0x34, 0x7b, 0xe8, 0x6e, 0x38, 0xf5, - 0xc2, 0x0c, 0x20, 0x1a, 0x62, 0xe2, 0xc1, 0x12, 0x0e, 0x26, 0x10, 0x36, 0x35, 0xf1, 0x60, 0x3c, - 0x38, 0x5b, 0x5e, 0xcb, 0xd8, 0x76, 0xa6, 0x99, 0x99, 0xae, 0xee, 0xcd, 0x8f, 0xe0, 0x37, 0xf2, - 0x64, 0xc2, 0x91, 0x23, 0xa7, 0x46, 0xea, 0xb7, 0xf0, 0x64, 0xfa, 0x47, 0x5a, 0x58, 0x88, 0xdc, - 0x3a, 0xef, 0xfb, 0xfc, 0x9e, 0xb7, 0xf3, 0xce, 0x83, 0x0e, 0xc2, 0x7d, 0x85, 0x99, 0x20, 0x61, - 0x3a, 0x05, 0xc9, 0x41, 0x83, 0x22, 0x33, 0xe0, 0xa7, 0x42, 0x92, 0xba, 0x41, 0x13, 0x46, 0x94, - 0x16, 0x92, 0x06, 0x40, 0x66, 0x3b, 0x53, 0xd0, 0x74, 0x87, 0x04, 0xc0, 0x41, 0x52, 0x0d, 0xa7, - 0x38, 0x91, 0x42, 0x0b, 0x73, 0x58, 0x69, 0x31, 0x4d, 0x18, 0xae, 0xb5, 0xb8, 0xd6, 0x0e, 0xb7, - 0x02, 0xa6, 0xcf, 0xd2, 0x29, 0xf6, 0x45, 0x4c, 0x02, 0x11, 0x08, 0x52, 0x22, 0xd3, 0xf4, 0x53, - 0x79, 0x2a, 0x0f, 0xe5, 0x57, 0x65, 0x35, 0xdc, 0x6c, 0x8d, 0xf5, 0x85, 0x2c, 0x66, 0xde, 0x1e, - 0x37, 0xdc, 0x6b, 0x34, 0x31, 0xf5, 0xcf, 0x18, 0x07, 0x39, 0x27, 0x49, 0x18, 0x14, 0x05, 0x45, - 0x62, 0xd0, 0xf4, 0x2e, 0x8a, 0xdc, 0x47, 0xc9, 0x94, 0x6b, 0x16, 0xc3, 0x02, 0xf0, 0xe2, 0x7f, - 0x80, 0xf2, 0xcf, 0x20, 0xa6, 0x0b, 0xdc, 0xb3, 0xfb, 0xb8, 0x54, 0xb3, 0x88, 0x30, 0xae, 0x95, - 0x96, 0xb7, 0xa1, 0xcd, 0x9f, 0xcb, 0x68, 0xed, 0x6d, 0xb5, 0xba, 0x83, 0x88, 0x2a, 0x65, 0x7e, - 0x44, 0x2b, 0xc5, 0x4d, 0x4e, 0xa9, 0xa6, 0x96, 0x31, 0x36, 0x9c, 0xfe, 0xee, 0x36, 0x6e, 0xd6, - 0x7c, 0x6d, 0x8c, 0x93, 0x30, 0x28, 0x0a, 0x0a, 0x17, 0x6a, 0x3c, 0xdb, 0xc1, 0x27, 0xd3, 0xcf, - 0xe0, 0xeb, 0x63, 0xd0, 0xd4, 0x35, 0xcf, 0xb3, 0x51, 0x27, 0xcf, 0x46, 0xa8, 0xa9, 0x79, 0xd7, - 0xae, 0xe6, 0x73, 0xd4, 0x4f, 0xa4, 0x98, 0x31, 0xc5, 0x04, 0x07, 0x69, 0x2d, 0x8d, 0x0d, 0x67, - 0xd5, 0x7d, 0x5a, 0x23, 0xfd, 0x49, 0xd3, 0xf2, 0xda, 0x3a, 0x33, 0x42, 0x28, 0xa1, 0x92, 0xc6, - 0xa0, 0x41, 0x2a, 0xab, 0x3b, 0xee, 0x3a, 0xfd, 0xdd, 0x7d, 0x7c, 0x7f, 0x02, 0x70, 0xfb, 0x5a, - 0x78, 0x72, 0x8d, 0x1e, 0x72, 0x2d, 0xe7, 0xcd, 0x2f, 0x36, 0x0d, 0xaf, 0xe5, 0x6f, 0x86, 0x68, - 0x5d, 0x82, 0x1f, 0x51, 0x16, 0x4f, 0x44, 0xc4, 0xfc, 0xb9, 0xb5, 0x5c, 0xfe, 0xe6, 0x61, 0x9e, - 0x8d, 0xd6, 0xbd, 0x76, 0xe3, 0x4f, 0x36, 0xda, 0x5e, 0xcc, 0x0e, 0x9e, 0x80, 0x54, 0x4c, 0x69, - 0xe0, 0xfa, 0x9d, 0x88, 0xd2, 0x18, 0x6e, 0x30, 0xde, 0x4d, 0x6f, 0x73, 0x0f, 0xad, 0xc5, 0x22, - 0xe5, 0xfa, 0x24, 0xd1, 0x4c, 0x70, 0x65, 0xf5, 0xc6, 0x5d, 0x67, 0xd5, 0x1d, 0xe4, 0xd9, 0x68, - 0xed, 0xb8, 0x55, 0xf7, 0x6e, 0xa8, 0xcc, 0x23, 0xb4, 0x41, 0xa3, 0x48, 0x7c, 0xa9, 0x06, 0x1c, - 0x7e, 0x4d, 0x28, 0x2f, 0x56, 0x65, 0x3d, 0x1a, 0x1b, 0xce, 0x8a, 0x6b, 0xe5, 0xd9, 0x68, 0xe3, - 0xf5, 0x1d, 0x7d, 0xef, 0x4e, 0x6a, 0xf8, 0x0a, 0x3d, 0xb9, 0xb5, 0x23, 0x73, 0x80, 0xba, 0x21, - 0xcc, 0xcb, 0x14, 0xac, 0x7a, 0xc5, 0xa7, 0xb9, 0x81, 0x7a, 0x33, 0x1a, 0xa5, 0x50, 0x3d, 0x9a, - 0x57, 0x1d, 0x5e, 0x2e, 0xed, 0x1b, 0x9b, 0x3f, 0x0c, 0x34, 0x68, 0x2f, 0xfc, 0x88, 0x29, 0x6d, - 0x7e, 0x58, 0xc8, 0x12, 0x7e, 0x58, 0x96, 0x0a, 0xba, 0x4c, 0xd2, 0xa0, 0x7e, 0xa6, 0x95, 0x7f, - 0x95, 0x56, 0x8e, 0x8e, 0x51, 0x8f, 0x69, 0x88, 0x95, 0xb5, 0x54, 0x66, 0xc1, 0x79, 0x68, 0x16, - 0xdc, 0xf5, 0xda, 0xb4, 0xf7, 0xa6, 0xc0, 0xbd, 0xca, 0xc5, 0xdd, 0x3a, 0xbf, 0xb2, 0x3b, 0x17, - 0x57, 0x76, 0xe7, 0xf2, 0xca, 0xee, 0x7c, 0xcb, 0x6d, 0xe3, 0x3c, 0xb7, 0x8d, 0x8b, 0xdc, 0x36, - 0x2e, 0x73, 0xdb, 0xf8, 0x95, 0xdb, 0xc6, 0xf7, 0xdf, 0x76, 0xe7, 0xfd, 0xe3, 0xda, 0xf1, 0x6f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xae, 0x44, 0x72, 0xc1, 0x04, 0x00, 0x00, + // 622 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd3, 0x40, + 0x14, 0x85, 0xe3, 0x86, 0xd0, 0x76, 0xd2, 0x8a, 0xd4, 0x74, 0x61, 0x65, 0x61, 0x47, 0x5d, 0x45, + 0x48, 0x1d, 0xb7, 0xa5, 0xa0, 0x0a, 0x89, 0x05, 0xae, 0xba, 0x40, 0x6a, 0xd4, 0xc8, 0x48, 0x15, + 0x42, 0x2c, 0x98, 0x38, 0x17, 0x77, 0x88, 0x3d, 0x63, 0xcd, 0x8c, 0x03, 0xd9, 0xf1, 0x08, 0xbc, + 0x01, 0x8f, 0xc2, 0xb6, 0xcb, 0x2e, 0xbb, 0xb2, 0xa8, 0x79, 0x8b, 0xae, 0x90, 0x7f, 0x68, 0xdc, + 0xfc, 0x88, 0xee, 0x3c, 0xf7, 0x9e, 0xef, 0xdc, 0x99, 0xeb, 0x83, 0x8e, 0x47, 0x47, 0x12, 0x53, + 0x6e, 0x8f, 0xe2, 0x01, 0x08, 0x06, 0x0a, 0xa4, 0x3d, 0x06, 0x36, 0xe4, 0xc2, 0x2e, 0x1b, 0x24, + 0xa2, 0xb6, 0x54, 0x5c, 0x10, 0x1f, 0xec, 0xf1, 0xfe, 0x00, 0x14, 0xd9, 0xb7, 0x7d, 0x60, 0x20, + 0x88, 0x82, 0x21, 0x8e, 0x04, 0x57, 0x5c, 0x6f, 0x17, 0x5a, 0x4c, 0x22, 0x8a, 0x4b, 0x2d, 0x2e, + 0xb5, 0xed, 0x5d, 0x9f, 0xaa, 0x8b, 0x78, 0x80, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x9d, 0x23, + 0x83, 0xf8, 0x73, 0x7e, 0xca, 0x0f, 0xf9, 0x57, 0x61, 0xd5, 0xde, 0xa9, 0x8c, 0xf5, 0xb8, 0xc8, + 0x66, 0xce, 0x8e, 0x6b, 0x1f, 0x4e, 0x35, 0x21, 0xf1, 0x2e, 0x28, 0x03, 0x31, 0xb1, 0xa3, 0x91, + 0x9f, 0x15, 0xa4, 0x1d, 0x82, 0x22, 0x8b, 0x28, 0x7b, 0x19, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, + 0xc0, 0xcb, 0xff, 0x01, 0xd2, 0xbb, 0x80, 0x90, 0xcc, 0x71, 0xcf, 0x97, 0x71, 0xb1, 0xa2, 0x81, + 0x4d, 0x99, 0x92, 0x4a, 0xcc, 0x42, 0x3b, 0x3f, 0x1b, 0x68, 0xe3, 0x5d, 0xb1, 0xba, 0xe3, 0x80, + 0x48, 0xa9, 0x7f, 0x42, 0x6b, 0xd9, 0x4b, 0x86, 0x44, 0x11, 0x43, 0xeb, 0x68, 0xdd, 0xe6, 0xc1, + 0x1e, 0x9e, 0xae, 0xf9, 0xce, 0x18, 0x47, 0x23, 0x3f, 0x2b, 0x48, 0x9c, 0xa9, 0xf1, 0x78, 0x1f, + 0x9f, 0x0d, 0xbe, 0x80, 0xa7, 0x7a, 0xa0, 0x88, 0xa3, 0x5f, 0x26, 0x56, 0x2d, 0x4d, 0x2c, 0x34, + 0xad, 0xb9, 0x77, 0xae, 0xfa, 0x0b, 0xd4, 0x8c, 0x04, 0x1f, 0x53, 0x49, 0x39, 0x03, 0x61, 0xac, + 0x74, 0xb4, 0xee, 0xba, 0xf3, 0xb4, 0x44, 0x9a, 0xfd, 0x69, 0xcb, 0xad, 0xea, 0xf4, 0x00, 0xa1, + 0x88, 0x08, 0x12, 0x82, 0x02, 0x21, 0x8d, 0x7a, 0xa7, 0xde, 0x6d, 0x1e, 0x1c, 0xe1, 0xe5, 0x09, + 0xc0, 0xd5, 0x67, 0xe1, 0xfe, 0x1d, 0x7a, 0xc2, 0x94, 0x98, 0x4c, 0xaf, 0x38, 0x6d, 0xb8, 0x15, + 0x7f, 0x7d, 0x84, 0x36, 0x05, 0x78, 0x01, 0xa1, 0x61, 0x9f, 0x07, 0xd4, 0x9b, 0x18, 0x8f, 0xf2, + 0x6b, 0x9e, 0xa4, 0x89, 0xb5, 0xe9, 0x56, 0x1b, 0xb7, 0x89, 0xb5, 0x37, 0x9f, 0x1d, 0xdc, 0x07, + 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x39, 0x0f, 0xe2, 0x10, 0xee, 0x31, 0xee, 0x7d, 0x6f, 0xfd, 0x10, + 0x6d, 0x84, 0x3c, 0x66, 0xea, 0x2c, 0x52, 0x94, 0x33, 0x69, 0x34, 0x3a, 0xf5, 0xee, 0xba, 0xd3, + 0x4a, 0x13, 0x6b, 0xa3, 0x57, 0xa9, 0xbb, 0xf7, 0x54, 0xfa, 0x29, 0xda, 0x26, 0x41, 0xc0, 0xbf, + 0x16, 0x03, 0x4e, 0xbe, 0x45, 0x84, 0x65, 0xab, 0x32, 0x1e, 0x77, 0xb4, 0xee, 0x9a, 0x63, 0xa4, + 0x89, 0xb5, 0xfd, 0x66, 0x41, 0xdf, 0x5d, 0x48, 0xe9, 0xef, 0xd1, 0xd6, 0x38, 0x2f, 0x39, 0x94, + 0x0d, 0x29, 0xf3, 0x7b, 0x7c, 0x08, 0xc6, 0x6a, 0xfe, 0xe8, 0x67, 0x69, 0x62, 0x6d, 0x9d, 0xcf, + 0x36, 0x6f, 0x17, 0x15, 0xdd, 0x79, 0x93, 0xf6, 0x6b, 0xf4, 0x64, 0x66, 0xfb, 0x7a, 0x0b, 0xd5, + 0x47, 0x30, 0xc9, 0xf3, 0xb5, 0xee, 0x66, 0x9f, 0xfa, 0x36, 0x6a, 0x8c, 0x49, 0x10, 0x43, 0x11, + 0x07, 0xb7, 0x38, 0xbc, 0x5a, 0x39, 0xd2, 0x76, 0x7e, 0x69, 0xa8, 0x55, 0xfd, 0x95, 0xa7, 0x54, + 0x2a, 0xfd, 0xe3, 0x5c, 0x4a, 0xf1, 0xc3, 0x52, 0x9a, 0xd1, 0x79, 0x46, 0x5b, 0x65, 0x00, 0xd6, + 0xfe, 0x55, 0x2a, 0x09, 0xed, 0xa1, 0x06, 0x55, 0x10, 0x4a, 0x63, 0x25, 0x4f, 0x59, 0xf7, 0xa1, + 0x29, 0x73, 0x36, 0x4b, 0xd3, 0xc6, 0xdb, 0x0c, 0x77, 0x0b, 0x17, 0x67, 0xf7, 0xf2, 0xc6, 0xac, + 0x5d, 0xdd, 0x98, 0xb5, 0xeb, 0x1b, 0xb3, 0xf6, 0x3d, 0x35, 0xb5, 0xcb, 0xd4, 0xd4, 0xae, 0x52, + 0x53, 0xbb, 0x4e, 0x4d, 0xed, 0x77, 0x6a, 0x6a, 0x3f, 0xfe, 0x98, 0xb5, 0x0f, 0xab, 0xa5, 0xe3, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xe2, 0x8e, 0x84, 0x1b, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 24420f5e..b0e030c0 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1beta1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. @@ -75,3 +82,4 @@ message StorageClassList { // Items is the list of StorageClasses repeated StorageClass items = 2; } + diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index 759ba81a..7f1f0c8e 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index e5036b55..7fb9ad98 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index e2148c23..85886f7d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index bf1f91a6..9d1e7982 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/README.md b/vendor/k8s.io/apiextensions-apiserver/README.md index c9c84bae..be75b9ba 100644 --- a/vendor/k8s.io/apiextensions-apiserver/README.md +++ b/vendor/k8s.io/apiextensions-apiserver/README.md @@ -6,7 +6,7 @@ It provides an API for registering `CustomResourceDefinitions`. ## Purpose -This API server provides the implementation for `CustomResourceDefinitions` which is included as +This API server provides the implementation for `CustomResourceDefinitions` which is included as delegate server inside of `kube-apiserver`. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index fa78ce6a..80b37bf7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -29,6 +29,7 @@ const ( // owner: @sttts, @nikhita // alpha: v1.8 + // beta: v1.9 // // CustomResourceValidation is a list of validation methods for CustomResources CustomResourceValidation utilfeature.Feature = "CustomResourceValidation" @@ -42,5 +43,5 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index d5503fac..9960600b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -405,84 +405,84 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. func IsNotFound(err error) bool { - return reasonForError(err) == metav1.StatusReasonNotFound + return ReasonForError(err) == metav1.StatusReasonNotFound } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. func IsAlreadyExists(err error) bool { - return reasonForError(err) == metav1.StatusReasonAlreadyExists + return ReasonForError(err) == metav1.StatusReasonAlreadyExists } // IsConflict determines if the err is an error which indicates the provided update conflicts. func IsConflict(err error) bool { - return reasonForError(err) == metav1.StatusReasonConflict + return ReasonForError(err) == metav1.StatusReasonConflict } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. func IsInvalid(err error) bool { - return reasonForError(err) == metav1.StatusReasonInvalid + return ReasonForError(err) == metav1.StatusReasonInvalid } // IsGone is true if the error indicates the requested resource is no longer available. func IsGone(err error) bool { - return reasonForError(err) == metav1.StatusReasonGone + return ReasonForError(err) == metav1.StatusReasonGone } // IsResourceExpired is true if the error indicates the resource has expired and the current action is // no longer possible. func IsResourceExpired(err error) bool { - return reasonForError(err) == metav1.StatusReasonExpired + return ReasonForError(err) == metav1.StatusReasonExpired } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { - return reasonForError(err) == metav1.StatusReasonMethodNotAllowed + return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. func IsServiceUnavailable(err error) bool { - return reasonForError(err) == metav1.StatusReasonServiceUnavailable + return ReasonForError(err) == metav1.StatusReasonServiceUnavailable } // IsBadRequest determines if err is an error which indicates that the request is invalid. func IsBadRequest(err error) bool { - return reasonForError(err) == metav1.StatusReasonBadRequest + return ReasonForError(err) == metav1.StatusReasonBadRequest } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. func IsUnauthorized(err error) bool { - return reasonForError(err) == metav1.StatusReasonUnauthorized + return ReasonForError(err) == metav1.StatusReasonUnauthorized } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. func IsForbidden(err error) bool { - return reasonForError(err) == metav1.StatusReasonForbidden + return ReasonForError(err) == metav1.StatusReasonForbidden } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. func IsTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonTimeout + return ReasonForError(err) == metav1.StatusReasonTimeout } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. func IsServerTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonServerTimeout + return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. func IsInternalError(err error) bool { - return reasonForError(err) == metav1.StatusReasonInternalError + return ReasonForError(err) == metav1.StatusReasonInternalError } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. func IsTooManyRequests(err error) bool { - if reasonForError(err) == metav1.StatusReasonTooManyRequests { + if ReasonForError(err) == metav1.StatusReasonTooManyRequests { return true } switch t := err.(type) { @@ -536,7 +536,8 @@ func SuggestsClientDelay(err error) (int, bool) { return 0, false } -func reasonForError(err error) metav1.StatusReason { +// ReasonForError returns the HTTP status for a particular error. +func ReasonForError(err error) metav1.StatusReason { switch t := err.(type) { case APIStatus: return t.Status().Reason diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index bf172369..5dc9d89e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -75,6 +75,9 @@ type MetadataAccessor interface { Annotations(obj runtime.Object) (map[string]string, error) SetAnnotations(obj runtime.Object, annotations map[string]string) error + Continue(obj runtime.Object) (string, error) + SetContinue(obj runtime.Object, c string) error + runtime.ResourceVersioner } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go new file mode 100644 index 00000000..7f92f39a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -0,0 +1,121 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// lazyObject defers loading the mapper and typer until necessary. +type lazyObject struct { + loader func() (RESTMapper, runtime.ObjectTyper, error) + + lock sync.Mutex + loaded bool + err error + mapper RESTMapper + typer runtime.ObjectTyper +} + +// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// returning those initialization errors when the interface methods are invoked. This defers the +// initialization and any server calls until a client actually needs to perform the action. +func NewLazyObjectLoader(fn func() (RESTMapper, runtime.ObjectTyper, error)) (RESTMapper, runtime.ObjectTyper) { + obj := &lazyObject{loader: fn} + return obj, obj +} + +// init lazily loads the mapper and typer, returning an error if initialization has failed. +func (o *lazyObject) init() error { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded { + return o.err + } + o.mapper, o.typer, o.err = o.loader() + o.loaded = true + return o.err +} + +var _ RESTMapper = &lazyObject{} +var _ runtime.ObjectTyper = &lazyObject{} + +func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return schema.GroupVersionKind{}, err + } + return o.mapper.KindFor(resource) +} + +func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionKind{}, err + } + return o.mapper.KindsFor(resource) +} + +func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return schema.GroupVersionResource{}, err + } + return o.mapper.ResourceFor(input) +} + +func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionResource{}, err + } + return o.mapper.ResourcesFor(input) +} + +func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMapping(gk, versions...) +} + +func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMappings(gk, versions...) +} + +func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { + if err := o.init(); err != nil { + return "", err + } + return o.mapper.ResourceSingularizer(resource) +} + +func (o *lazyObject) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + if err := o.init(); err != nil { + return nil, false, err + } + return o.typer.ObjectKinds(obj) +} + +func (o *lazyObject) Recognizes(gvk schema.GroupVersionKind) bool { + if err := o.init(); err != nil { + return false + } + return o.typer.Recognizes(gvk) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1889b951..c2d51b43 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -89,7 +89,7 @@ func ListAccessor(obj interface{}) (List, error) { } return nil, errNotList default: - panic(fmt.Errorf("%T does not implement the List interface", obj)) + return nil, errNotList } } @@ -367,6 +367,23 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e return nil } +func (resourceAccessor) Continue(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetContinue(), nil +} + +func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetContinue(version) + return nil +} + // extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go index 3ebf2481..4e13efea 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -21,8 +21,24 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) +// InterfacesForUnstructuredConversion returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects and supports conversion +// from typed objects (provided by parent) to untyped objects. +func InterfacesForUnstructuredConversion(parent VersionInterfacesFunc) VersionInterfacesFunc { + return func(version schema.GroupVersion) (*VersionInterfaces, error) { + if i, err := parent(version); err == nil { + return &VersionInterfaces{ + ObjectConvertor: i.ObjectConvertor, + MetadataAccessor: NewAccessor(), + }, nil + } + return InterfacesForUnstructured(version) + } +} + // InterfacesForUnstructured returns VersionInterfaces suitable for -// dealing with unstructured.Unstructured objects. +// dealing with unstructured.Unstructured objects. It will return errors for +// other conversions. func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { return &VersionInterfaces{ ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index 118dfca0..eb49f819 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package resource -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Quantity).DeepCopyInto(out.(*Quantity)) - return nil - }, InType: reflect.TypeOf(&Quantity{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Quantity) DeepCopyInto(out *Quantity) { *out = in.DeepCopy() diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go index 5150db01..0da94f50 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +// Package to keep track of API Versions that can be registered and are enabled in a Scheme. package registered import ( diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go index 213e34bc..baca784f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -38,7 +38,7 @@ type GroupMeta struct { // to go through the InterfacesFor method below. SelfLinker runtime.SelfLinker - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // RESTMapper provides the default mapping between REST paths and the objects declared in a Scheme and all known // versions. RESTMapper meta.RESTMapper diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index f13ff9fc..deaf5309 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package internalversion import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index a96f38ee..c62f8533 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -38,6 +38,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_intstr_IntOrString_To_intstr_IntOrString, Convert_unversioned_Time_To_unversioned_Time, + Convert_unversioned_MicroTime_To_unversioned_MicroTime, Convert_Pointer_v1_Duration_To_v1_Duration, Convert_v1_Duration_To_Pointer_v1_Duration, @@ -199,6 +200,12 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s return nil } +func Convert_unversioned_MicroTime_To_unversioned_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 653b3023..b8218469 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -50,6 +50,7 @@ limitations under the License. MicroTime ObjectMeta OwnerReference + Patch Preconditions RootPaths ServerAddressByClientCIDR @@ -196,51 +197,55 @@ func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -268,6 +273,7 @@ func init() { proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") @@ -1317,6 +1323,24 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2065,6 +2089,12 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *Patch) Size() (n int) { + var l int + _ = l + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -2464,6 +2494,15 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" @@ -6382,6 +6421,56 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Preconditions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7715,7 +7804,7 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2428 bytes of a gzipped FileDescriptorProto + // 2435 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b, 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32, @@ -7723,7 +7812,7 @@ var fileDescriptorGenerated = []byte{ 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5, 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71, 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9, - 0xde, 0xab, 0xaa, 0xf7, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, + 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e, 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09, 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e, @@ -7735,137 +7824,138 @@ var fileDescriptorGenerated = []byte{ 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9, 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b, 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac, - 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x36, 0xad, 0x2e, 0x0d, 0xdc, 0xf7, 0x55, 0x6f, 0x7a, - 0xb4, 0x69, 0x33, 0xcb, 0x3b, 0x21, 0xf4, 0xcc, 0x58, 0x52, 0x56, 0x0a, 0x8a, 0xc9, 0x70, 0x68, - 0x01, 0xfd, 0x5c, 0x83, 0xa5, 0x1e, 0x25, 0x87, 0x84, 0x52, 0xd2, 0x56, 0xfc, 0x4a, 0x76, 0x4d, - 0xfb, 0x0c, 0xcc, 0x56, 0x94, 0xd9, 0xa5, 0xd6, 0x08, 0x3e, 0x1e, 0xb3, 0x88, 0x7e, 0xa7, 0xc1, - 0x2a, 0x23, 0xf4, 0x84, 0xd0, 0xad, 0x76, 0x9b, 0x12, 0xc6, 0x8c, 0xb3, 0x6d, 0xc7, 0x26, 0x2e, - 0xdf, 0xde, 0x6d, 0x62, 0x56, 0x99, 0x95, 0xfb, 0xf0, 0x9d, 0x74, 0x0e, 0xed, 0x4f, 0xc2, 0x31, - 0x74, 0xe5, 0xd1, 0xea, 0x44, 0x11, 0x86, 0x5f, 0xe0, 0x86, 0x7e, 0x08, 0xf3, 0xc1, 0x41, 0xde, - 0xb6, 0x19, 0x47, 0xf7, 0x21, 0xdf, 0x11, 0x1f, 0xac, 0xa2, 0x49, 0x07, 0xeb, 0xe9, 0x1c, 0x0c, - 0x30, 0x8c, 0x05, 0xe5, 0x4f, 0x5e, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x3f, 0x67, 0xa1, 0xb4, 0xd5, - 0xda, 0xc5, 0x84, 0x79, 0x7d, 0x6a, 0x91, 0x14, 0x41, 0xb3, 0x09, 0x20, 0xfe, 0xb2, 0x9e, 0x69, - 0x91, 0x76, 0x25, 0xb3, 0xa6, 0xad, 0x17, 0x0c, 0xa4, 0xe4, 0xe0, 0x4e, 0xc8, 0xc1, 0x31, 0x29, - 0x81, 0x7a, 0x6c, 0xbb, 0x6d, 0x79, 0xda, 0x31, 0xd4, 0x77, 0x6d, 0xb7, 0x8d, 0x25, 0x07, 0xdd, - 0x86, 0xdc, 0x09, 0xa1, 0x07, 0x62, 0xff, 0x45, 0x40, 0x7c, 0x25, 0xdd, 0xf2, 0xee, 0x0b, 0x15, - 0xa3, 0x38, 0x1c, 0xd4, 0x72, 0xf2, 0x27, 0xf6, 0x41, 0x50, 0x1d, 0x80, 0x1d, 0x79, 0x94, 0x4b, - 0x77, 0x2a, 0xb9, 0xb5, 0xec, 0x7a, 0xd1, 0x58, 0x10, 0xfe, 0xed, 0x87, 0x54, 0x1c, 0x93, 0x40, - 0xd7, 0x60, 0x9e, 0xd9, 0x6e, 0xa7, 0xef, 0x98, 0x54, 0x10, 0x2a, 0x79, 0xe9, 0xe7, 0x8a, 0xf2, - 0x73, 0x7e, 0x3f, 0xc6, 0xc3, 0x09, 0x49, 0x61, 0xc9, 0x32, 0x39, 0xe9, 0x78, 0xd4, 0x26, 0xac, - 0x32, 0x17, 0x59, 0xda, 0x0e, 0xa9, 0x38, 0x26, 0x81, 0x5e, 0x87, 0x9c, 0xdc, 0xf9, 0x4a, 0x41, - 0x9a, 0x28, 0x2b, 0x13, 0x39, 0x79, 0x2c, 0xd8, 0xe7, 0xa1, 0x37, 0x61, 0x4e, 0x65, 0x4d, 0xa5, - 0x28, 0xc5, 0x16, 0x95, 0xd8, 0x5c, 0x10, 0xd6, 0x01, 0x5f, 0xff, 0xa3, 0x06, 0x8b, 0xb1, 0xf3, - 0x93, 0xb1, 0x72, 0x0d, 0xe6, 0x3b, 0xb1, 0x4c, 0x51, 0x67, 0x19, 0xae, 0x26, 0x9e, 0x45, 0x38, - 0x21, 0x89, 0x08, 0x14, 0xa9, 0x42, 0x0a, 0x2a, 0xc2, 0x46, 0xea, 0x40, 0x0b, 0x7c, 0x88, 0x2c, - 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x19, 0x74, 0x41, 0x8d, 0x40, 0xeb, 0xb1, 0x3a, - 0xa4, 0xc9, 0x2d, 0x9c, 0x9f, 0x50, 0x43, 0x2e, 0x49, 0xde, 0xcc, 0xff, 0x45, 0xf2, 0x5e, 0x2f, + 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x82, 0x56, 0x97, 0x06, 0xee, 0xfb, 0xaa, 0x37, 0x3d, + 0xda, 0xb4, 0x99, 0xe5, 0x9d, 0x10, 0x7a, 0x66, 0x2c, 0x29, 0x2b, 0x05, 0xc5, 0x64, 0x38, 0xb4, + 0x80, 0x7e, 0xae, 0xc1, 0x52, 0x8f, 0x92, 0x43, 0x42, 0x29, 0x69, 0x2b, 0x7e, 0x25, 0xbb, 0xa6, + 0x7d, 0x06, 0x66, 0x2b, 0xca, 0xec, 0x52, 0x6b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0xbf, 0xd3, 0x60, + 0x95, 0x11, 0x7a, 0x42, 0xe8, 0x56, 0xbb, 0x4d, 0x09, 0x63, 0xc6, 0xd9, 0xb6, 0x63, 0x13, 0x97, + 0x6f, 0xef, 0x36, 0x31, 0xab, 0xcc, 0xca, 0x38, 0x7c, 0x27, 0x9d, 0x43, 0xfb, 0x93, 0x70, 0x0c, + 0x5d, 0x79, 0xb4, 0x3a, 0x51, 0x84, 0xe1, 0x17, 0xb8, 0xa1, 0x1f, 0xc2, 0x7c, 0xb0, 0x91, 0xb7, + 0x6d, 0xc6, 0xd1, 0x7d, 0xc8, 0x77, 0xc4, 0x07, 0xab, 0x68, 0xd2, 0xc1, 0x7a, 0x3a, 0x07, 0x03, + 0x0c, 0x63, 0x41, 0xf9, 0x93, 0x97, 0x9f, 0x0c, 0x2b, 0x34, 0xfd, 0xcf, 0x59, 0x28, 0x6d, 0xb5, + 0x76, 0x31, 0x61, 0x5e, 0x9f, 0x5a, 0x24, 0x45, 0xd2, 0x6c, 0x02, 0x88, 0xbf, 0xac, 0x67, 0x5a, + 0xa4, 0x5d, 0xc9, 0xac, 0x69, 0xeb, 0x05, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x72, 0x70, 0x4c, 0x4a, + 0xa0, 0x1e, 0xdb, 0x6e, 0x5b, 0xee, 0x76, 0x0c, 0xf5, 0x5d, 0xdb, 0x6d, 0x63, 0xc9, 0x41, 0xb7, + 0x21, 0x77, 0x42, 0xe8, 0x81, 0x88, 0xbf, 0x48, 0x88, 0xaf, 0xa4, 0x5b, 0xde, 0x7d, 0xa1, 0x62, + 0x14, 0x87, 0x83, 0x5a, 0x4e, 0xfe, 0xc4, 0x3e, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x8f, 0x72, 0xe9, + 0x4e, 0x25, 0xb7, 0x96, 0x5d, 0x2f, 0x1a, 0x0b, 0xc2, 0xbf, 0xfd, 0x90, 0x8a, 0x63, 0x12, 0xe8, + 0x1a, 0xcc, 0x33, 0xdb, 0xed, 0xf4, 0x1d, 0x93, 0x0a, 0x42, 0x25, 0x2f, 0xfd, 0x5c, 0x51, 0x7e, + 0xce, 0xef, 0xc7, 0x78, 0x38, 0x21, 0x29, 0x2c, 0x59, 0x26, 0x27, 0x1d, 0x8f, 0xda, 0x84, 0x55, + 0xe6, 0x22, 0x4b, 0xdb, 0x21, 0x15, 0xc7, 0x24, 0xd0, 0xeb, 0x90, 0x93, 0x91, 0xaf, 0x14, 0xa4, + 0x89, 0xb2, 0x32, 0x91, 0x93, 0xdb, 0x82, 0x7d, 0x1e, 0x7a, 0x13, 0xe6, 0xd4, 0xa9, 0xa9, 0x14, + 0xa5, 0xd8, 0xa2, 0x12, 0x9b, 0x0b, 0xd2, 0x3a, 0xe0, 0xeb, 0x7f, 0xd4, 0x60, 0x31, 0xb6, 0x7f, + 0x32, 0x57, 0xae, 0xc1, 0x7c, 0x27, 0x76, 0x52, 0xd4, 0x5e, 0x86, 0xab, 0x89, 0x9f, 0x22, 0x9c, + 0x90, 0x44, 0x04, 0x8a, 0x54, 0x21, 0x05, 0x15, 0x61, 0x23, 0x75, 0xa2, 0x05, 0x3e, 0x44, 0x96, + 0x62, 0x44, 0x86, 0x23, 0x64, 0xfd, 0x9f, 0x9a, 0x4c, 0xba, 0xa0, 0x46, 0xa0, 0xf5, 0x58, 0x1d, + 0xd2, 0x64, 0x08, 0xe7, 0x27, 0xd4, 0x90, 0x4b, 0x0e, 0x6f, 0xe6, 0xff, 0xe2, 0xf0, 0x5e, 0x2f, 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c, 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd, - 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x11, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, - 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0x22, 0x71, - 0xde, 0x4e, 0xb7, 0xf6, 0x56, 0x5c, 0xd5, 0x58, 0x1e, 0x0e, 0x6a, 0xe5, 0x04, 0x09, 0x27, 0xc1, - 0xd1, 0x77, 0x61, 0xc9, 0xa3, 0xbd, 0x23, 0xd3, 0x6d, 0x92, 0x1e, 0x71, 0xdb, 0xc4, 0xe5, 0x4c, - 0x26, 0x73, 0xc1, 0x58, 0x11, 0x65, 0xf7, 0xee, 0x08, 0x0f, 0x8f, 0x49, 0xa3, 0x07, 0xb0, 0xdc, - 0xa3, 0x5e, 0xcf, 0xec, 0x98, 0x02, 0xb1, 0xe5, 0x39, 0xb6, 0x75, 0x26, 0x93, 0xbd, 0x68, 0x5c, - 0x1d, 0x0e, 0x6a, 0xcb, 0xad, 0x51, 0xe6, 0xf9, 0xa0, 0xf6, 0x8a, 0xdc, 0x3a, 0x41, 0x89, 0x98, - 0x78, 0x1c, 0x46, 0xdf, 0x85, 0x42, 0xb3, 0x4f, 0x25, 0x05, 0x7d, 0x1b, 0x0a, 0x6d, 0xf5, 0x5b, - 0xed, 0xea, 0x6b, 0xc1, 0x9d, 0x14, 0xc8, 0x9c, 0x0f, 0x6a, 0x65, 0x71, 0xf5, 0xd6, 0x03, 0x02, - 0x0e, 0x55, 0xf4, 0x87, 0x50, 0xde, 0x39, 0xed, 0x79, 0x94, 0x07, 0xe7, 0xf5, 0x06, 0xe4, 0x89, - 0x24, 0x48, 0xb4, 0x42, 0x54, 0x48, 0x7d, 0x31, 0xac, 0xb8, 0x22, 0xb1, 0xc9, 0xa9, 0x69, 0x71, - 0x55, 0x11, 0xc3, 0xc4, 0xde, 0x11, 0x44, 0xec, 0xf3, 0xf4, 0x27, 0x1a, 0xc0, 0x2d, 0x12, 0x62, - 0x6f, 0xc1, 0x62, 0x90, 0x14, 0xc9, 0x5c, 0xfd, 0xbc, 0xd2, 0x5e, 0xc4, 0x49, 0x36, 0x1e, 0x95, - 0x47, 0x2d, 0x58, 0xb1, 0x5d, 0xcb, 0xe9, 0xb7, 0xc9, 0x3d, 0xd7, 0x76, 0x6d, 0x6e, 0x9b, 0x8e, - 0xfd, 0x93, 0xb0, 0x2e, 0x7f, 0x41, 0xe1, 0xac, 0xec, 0x5e, 0x20, 0x83, 0x2f, 0xd4, 0xd4, 0x1f, - 0x42, 0x51, 0x56, 0x08, 0x51, 0x9c, 0xa3, 0x72, 0xa5, 0xbd, 0xa0, 0x5c, 0x05, 0xd5, 0x3d, 0x33, - 0xa9, 0xba, 0xc7, 0x12, 0xc2, 0x81, 0xb2, 0xaf, 0x1b, 0x5c, 0x38, 0xa9, 0x2c, 0x5c, 0x85, 0x42, - 0xb0, 0x70, 0x65, 0x25, 0x6c, 0x34, 0x02, 0x20, 0x1c, 0x4a, 0xc4, 0xac, 0x1d, 0x41, 0xa2, 0xda, - 0xa5, 0x33, 0x16, 0xab, 0xbe, 0x99, 0x17, 0x57, 0xdf, 0x98, 0xa5, 0x9f, 0x41, 0x65, 0x52, 0x77, - 0xf2, 0x12, 0xf5, 0x38, 0xbd, 0x2b, 0xfa, 0xaf, 0x35, 0x58, 0x8a, 0x23, 0xa5, 0x3f, 0xbe, 0xf4, - 0x46, 0x2e, 0xbf, 0xc7, 0x63, 0x3b, 0xf2, 0x5b, 0x0d, 0x56, 0x12, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, - 0x70, 0x2a, 0x1e, 0x1c, 0xd9, 0x29, 0x82, 0xa3, 0x01, 0xa5, 0xdd, 0x30, 0xee, 0xe9, 0xe5, 0x9d, - 0x8f, 0xfe, 0x17, 0x0d, 0xe6, 0x63, 0x1a, 0x0c, 0x3d, 0x84, 0x39, 0x51, 0xdf, 0x6c, 0xb7, 0xa3, - 0xba, 0xb2, 0x94, 0x97, 0x65, 0x0c, 0x24, 0x5a, 0x57, 0xcb, 0x47, 0xc2, 0x01, 0x24, 0x6a, 0x41, - 0x9e, 0x12, 0xd6, 0x77, 0xb8, 0x2a, 0xed, 0x57, 0x53, 0x5e, 0x6b, 0xdc, 0xe4, 0x7d, 0x66, 0x80, - 0xa8, 0x51, 0x58, 0xea, 0x63, 0x85, 0xa3, 0xff, 0x3d, 0x03, 0xe5, 0xdb, 0xe6, 0x01, 0x71, 0xf6, - 0x89, 0x43, 0x2c, 0xee, 0x51, 0xf4, 0x53, 0x28, 0x75, 0x4d, 0x6e, 0x1d, 0x49, 0x6a, 0xd0, 0x5b, - 0x36, 0xd3, 0x19, 0x4a, 0x20, 0xd5, 0xf7, 0x22, 0x98, 0x1d, 0x97, 0xd3, 0x33, 0xe3, 0x15, 0xb5, - 0xb0, 0x52, 0x8c, 0x83, 0xe3, 0xd6, 0xe4, 0x83, 0x40, 0x7e, 0xef, 0x9c, 0xf6, 0xc4, 0x25, 0x3a, - 0xfd, 0x3b, 0x24, 0xe1, 0x02, 0x26, 0xef, 0xf7, 0x6d, 0x4a, 0xba, 0xc4, 0xe5, 0xd1, 0x83, 0x60, - 0x6f, 0x04, 0x1f, 0x8f, 0x59, 0x5c, 0xbd, 0x01, 0x4b, 0xa3, 0xce, 0xa3, 0x25, 0xc8, 0x1e, 0x93, - 0x33, 0x3f, 0x16, 0xb0, 0xf8, 0x89, 0x56, 0x20, 0x77, 0x62, 0x3a, 0x7d, 0x55, 0x7f, 0xb0, 0xff, - 0x71, 0x3d, 0x73, 0x4d, 0xd3, 0x7f, 0xaf, 0x41, 0x65, 0x92, 0x23, 0xe8, 0x8b, 0x31, 0x20, 0xa3, - 0xa4, 0xbc, 0xca, 0xbe, 0x4b, 0xce, 0x7c, 0xd4, 0x1d, 0x28, 0x78, 0x3d, 0xf1, 0x84, 0xf3, 0xa8, - 0x8a, 0xf3, 0x37, 0x83, 0xd8, 0xbd, 0xab, 0xe8, 0xe7, 0x83, 0xda, 0x95, 0x04, 0x7c, 0xc0, 0xc0, - 0xa1, 0x2a, 0xd2, 0x21, 0x2f, 0xfd, 0x11, 0x97, 0xb2, 0x68, 0x9f, 0xe4, 0xe1, 0xdf, 0x97, 0x14, - 0xac, 0x38, 0xfa, 0x9f, 0x34, 0x98, 0x95, 0xed, 0xe1, 0x43, 0x28, 0x88, 0xfd, 0x6b, 0x9b, 0xdc, - 0x94, 0x7e, 0xa5, 0x7e, 0x4c, 0x08, 0xed, 0x3d, 0xc2, 0xcd, 0x28, 0xbf, 0x02, 0x0a, 0x0e, 0x11, - 0x11, 0x86, 0x9c, 0xcd, 0x49, 0x37, 0x38, 0xc8, 0xb7, 0x26, 0x42, 0xab, 0xf7, 0x6f, 0x1d, 0x9b, - 0x8f, 0x76, 0x4e, 0x39, 0x71, 0xc5, 0x61, 0x44, 0xc5, 0x60, 0x57, 0x60, 0x60, 0x1f, 0x4a, 0xff, - 0x83, 0x06, 0xa1, 0x29, 0x91, 0xee, 0x8c, 0x38, 0x87, 0xb7, 0x6d, 0xf7, 0x58, 0x6d, 0x6b, 0xe8, - 0xce, 0xbe, 0xa2, 0xe3, 0x50, 0xe2, 0xa2, 0x2b, 0x36, 0x33, 0xe5, 0x15, 0x7b, 0x15, 0x0a, 0x96, - 0xe7, 0x72, 0xdb, 0xed, 0x8f, 0xd5, 0x97, 0x6d, 0x45, 0xc7, 0xa1, 0x84, 0xfe, 0x2c, 0x0b, 0x25, - 0xe1, 0x6b, 0x70, 0xc7, 0x7f, 0x13, 0xca, 0x4e, 0xfc, 0xf4, 0x94, 0xcf, 0x57, 0x14, 0x44, 0x32, - 0x1f, 0x71, 0x52, 0x56, 0x28, 0x1f, 0xda, 0xc4, 0x69, 0x87, 0xca, 0x99, 0xa4, 0xf2, 0xcd, 0x38, - 0x13, 0x27, 0x65, 0x45, 0x9d, 0x7d, 0x24, 0xe2, 0x5a, 0x35, 0x6a, 0xe1, 0xd6, 0x7e, 0x4f, 0x10, - 0xb1, 0xcf, 0xbb, 0x68, 0x7f, 0x66, 0xa7, 0xdc, 0x9f, 0xeb, 0xb0, 0x20, 0x0e, 0xd2, 0xeb, 0xf3, - 0xa0, 0x9b, 0xcd, 0xc9, 0xbe, 0x0b, 0x0d, 0x07, 0xb5, 0x85, 0xf7, 0x12, 0x1c, 0x3c, 0x22, 0x39, - 0xb1, 0x7d, 0xc9, 0x7f, 0xda, 0xf6, 0x45, 0xac, 0xda, 0xb1, 0xbb, 0x36, 0xaf, 0xcc, 0x49, 0x27, - 0xc2, 0x55, 0xdf, 0x16, 0x44, 0xec, 0xf3, 0x12, 0x47, 0x5a, 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, - 0x9e, 0x6d, 0x51, 0x4f, 0xac, 0x45, 0x5c, 0x4c, 0x2c, 0xd1, 0xb4, 0x87, 0x05, 0x3c, 0x58, 0x63, - 0xc0, 0x17, 0xae, 0xb8, 0xa6, 0xeb, 0xf9, 0xad, 0x79, 0x2e, 0x72, 0xe5, 0x8e, 0x20, 0x62, 0x9f, - 0x77, 0x7d, 0x45, 0xdc, 0x47, 0xbf, 0x7c, 0x52, 0x9b, 0x79, 0xfc, 0xa4, 0x36, 0xf3, 0xe1, 0x13, - 0x75, 0x37, 0xfd, 0x0b, 0x00, 0xee, 0x1e, 0xfc, 0x98, 0x58, 0x7e, 0xcc, 0x5f, 0xfe, 0x2a, 0x17, - 0x3d, 0x86, 0x1a, 0x06, 0xc9, 0x17, 0x6c, 0x66, 0xa4, 0xc7, 0x88, 0xf1, 0x70, 0x42, 0x12, 0x35, - 0xa0, 0x18, 0xbe, 0xd4, 0x55, 0x7c, 0x2f, 0x2b, 0xb5, 0x62, 0xf8, 0x9c, 0xc7, 0x91, 0x4c, 0x22, - 0x01, 0x67, 0x2f, 0x4d, 0x40, 0x03, 0xb2, 0x7d, 0xbb, 0x2d, 0x43, 0xa2, 0x68, 0x7c, 0x35, 0x28, - 0x80, 0xf7, 0x76, 0x9b, 0xe7, 0x83, 0xda, 0x6b, 0x93, 0x66, 0x5c, 0xfc, 0xac, 0x47, 0x58, 0xfd, - 0xde, 0x6e, 0x13, 0x0b, 0xe5, 0x8b, 0x82, 0x34, 0x3f, 0x65, 0x90, 0x6e, 0x02, 0xa8, 0x55, 0x0b, - 0x6d, 0x3f, 0x36, 0xc2, 0xa9, 0xc5, 0xad, 0x90, 0x83, 0x63, 0x52, 0x88, 0xc1, 0xb2, 0x45, 0x89, - 0xfc, 0x2d, 0x8e, 0x9e, 0x71, 0xb3, 0xeb, 0xbf, 0xdb, 0x4b, 0x9b, 0x5f, 0x4e, 0x57, 0x31, 0x85, - 0x9a, 0xf1, 0xaa, 0x32, 0xb3, 0xbc, 0x3d, 0x0a, 0x86, 0xc7, 0xf1, 0x91, 0x07, 0xcb, 0x6d, 0xf5, - 0xea, 0x89, 0x8c, 0x16, 0xa7, 0x36, 0x7a, 0x45, 0x18, 0x6c, 0x8e, 0x02, 0xe1, 0x71, 0x6c, 0xf4, - 0x43, 0x58, 0x0d, 0x88, 0xe3, 0x4f, 0xcf, 0x0a, 0xc8, 0x9d, 0xaa, 0x8a, 0xc7, 0x70, 0x73, 0xa2, - 0x14, 0x7e, 0x01, 0x02, 0x6a, 0x43, 0xde, 0xf1, 0xbb, 0x8b, 0x92, 0xbc, 0x11, 0xbe, 0x95, 0x6e, - 0x15, 0x51, 0xf4, 0xd7, 0xe3, 0x5d, 0x45, 0xf8, 0xfc, 0x52, 0x0d, 0x85, 0xc2, 0x46, 0xa7, 0x50, - 0x32, 0x5d, 0xd7, 0xe3, 0xa6, 0xff, 0x18, 0x9e, 0x97, 0xa6, 0xb6, 0xa6, 0x36, 0xb5, 0x15, 0x61, - 0x8c, 0x74, 0x31, 0x31, 0x0e, 0x8e, 0x9b, 0x42, 0x8f, 0x60, 0xd1, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, - 0x21, 0xa1, 0xc4, 0xb5, 0x08, 0xab, 0x94, 0xa5, 0xf5, 0xaf, 0xa5, 0xb4, 0x9e, 0x50, 0x8e, 0x42, - 0x3a, 0x49, 0x67, 0x78, 0xd4, 0x0a, 0xaa, 0x03, 0x1c, 0xda, 0xae, 0xea, 0x45, 0x2b, 0x0b, 0xd1, - 0xe8, 0xe9, 0x66, 0x48, 0xc5, 0x31, 0x09, 0xf4, 0x75, 0x28, 0x59, 0x4e, 0x9f, 0x71, 0xe2, 0xcf, - 0xb8, 0x16, 0x65, 0x06, 0x85, 0xeb, 0xdb, 0x8e, 0x58, 0x38, 0x2e, 0x87, 0x8e, 0x60, 0xde, 0x8e, - 0x35, 0xbd, 0x95, 0x25, 0x19, 0x8b, 0x9b, 0x53, 0x77, 0xba, 0xcc, 0x58, 0x12, 0x95, 0x28, 0x4e, - 0xc1, 0x09, 0xe4, 0xd5, 0x6f, 0x40, 0xe9, 0x53, 0xf6, 0x60, 0xa2, 0x87, 0x1b, 0x3d, 0xba, 0xa9, - 0x7a, 0xb8, 0xbf, 0x66, 0x60, 0x21, 0xb9, 0xe1, 0xe1, 0x5b, 0x47, 0x9b, 0x38, 0xb3, 0x0c, 0xaa, - 0x72, 0x76, 0x62, 0x55, 0x56, 0xc5, 0x6f, 0xf6, 0x65, 0x8a, 0xdf, 0x26, 0x80, 0xd9, 0xb3, 0x83, - 0xba, 0xe7, 0xd7, 0xd1, 0xb0, 0x72, 0x45, 0x53, 0x34, 0x1c, 0x93, 0x92, 0x53, 0x49, 0xcf, 0xe5, - 0xd4, 0x73, 0x1c, 0x42, 0xd5, 0x65, 0xea, 0x4f, 0x25, 0x43, 0x2a, 0x8e, 0x49, 0xa0, 0x9b, 0x80, - 0x0e, 0x1c, 0xcf, 0x3a, 0x96, 0x5b, 0x10, 0xe4, 0xb9, 0xac, 0x92, 0x05, 0x7f, 0x28, 0x65, 0x8c, - 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x48, 0x8e, 0x91, 0xd0, 0x0d, 0x7f, 0x03, 0xb4, 0x70, 0xce, - 0x33, 0xdd, 0xe2, 0xf5, 0xab, 0x50, 0xc4, 0x9e, 0xc7, 0x5b, 0x26, 0x3f, 0x62, 0xa8, 0x06, 0xb9, - 0x9e, 0xf8, 0xa1, 0x66, 0x84, 0x72, 0xec, 0x2b, 0x39, 0xd8, 0xa7, 0xeb, 0xbf, 0xd2, 0xe0, 0xd5, - 0x89, 0x23, 0x3b, 0xb1, 0x91, 0x56, 0xf8, 0xa5, 0x5c, 0x0a, 0x37, 0x32, 0x92, 0xc3, 0x31, 0x29, - 0xd1, 0x80, 0x25, 0xe6, 0x7c, 0xa3, 0x0d, 0x58, 0xc2, 0x1a, 0x4e, 0xca, 0xea, 0xff, 0xce, 0x40, - 0xde, 0x7f, 0x8d, 0xfd, 0x97, 0x7b, 0xee, 0x37, 0x20, 0xcf, 0xa4, 0x1d, 0xe5, 0x5e, 0x58, 0x24, - 0x7d, 0xeb, 0x58, 0x71, 0x45, 0xef, 0xd2, 0x25, 0x8c, 0x99, 0x9d, 0x20, 0x66, 0xc3, 0xde, 0x65, - 0xcf, 0x27, 0xe3, 0x80, 0x8f, 0xde, 0x11, 0x8f, 0x4f, 0x93, 0x85, 0xed, 0x60, 0x35, 0x80, 0xc4, - 0x92, 0x7a, 0x3e, 0xa8, 0xcd, 0x2b, 0x70, 0xf9, 0x8d, 0x95, 0x34, 0x7a, 0x00, 0x73, 0x6d, 0xc2, - 0x4d, 0xdb, 0xf1, 0xbb, 0xc0, 0xd4, 0x03, 0x49, 0x1f, 0xac, 0xe9, 0xab, 0x1a, 0x25, 0xe1, 0x93, - 0xfa, 0xc0, 0x01, 0xa0, 0xc8, 0x37, 0xcb, 0x6b, 0xfb, 0xd3, 0xf9, 0x5c, 0x94, 0x6f, 0xdb, 0x5e, - 0x9b, 0x60, 0xc9, 0xd1, 0x1f, 0x6b, 0x50, 0xf2, 0x91, 0xb6, 0xcd, 0x3e, 0x23, 0x68, 0x23, 0x5c, - 0x85, 0x7f, 0xdc, 0xc1, 0x55, 0x3c, 0xfb, 0xde, 0x59, 0x8f, 0x9c, 0x0f, 0x6a, 0x45, 0x29, 0x26, - 0x3e, 0xc2, 0x05, 0xc4, 0xf6, 0x28, 0x73, 0xc9, 0x1e, 0xbd, 0x0e, 0x39, 0xd9, 0x71, 0xab, 0xcd, - 0x0c, 0xfb, 0x3b, 0xd9, 0x95, 0x63, 0x9f, 0xa7, 0x7f, 0x9c, 0x81, 0x72, 0x62, 0x71, 0x29, 0x9a, - 0xb9, 0x70, 0x42, 0x92, 0x49, 0x31, 0x75, 0x9b, 0xfc, 0x3f, 0x95, 0xef, 0x43, 0xde, 0x12, 0xeb, - 0x0b, 0xfe, 0xa9, 0xb5, 0x31, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x22, 0x49, 0x7e, 0x32, 0xac, 0x00, - 0xd1, 0x2d, 0x58, 0xa6, 0x84, 0xd3, 0xb3, 0xad, 0x43, 0x4e, 0x68, 0xbc, 0xed, 0xcf, 0x45, 0xed, - 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x41, 0x85, 0xcc, 0xbf, 0x44, 0x85, 0xd4, 0x1d, 0x98, 0xfd, - 0x1f, 0xb6, 0xe6, 0x3f, 0x80, 0x62, 0xd4, 0x3c, 0x7d, 0xc6, 0x26, 0xf5, 0x1f, 0x41, 0x41, 0x44, - 0x63, 0xd0, 0xf4, 0x5f, 0x72, 0x01, 0x25, 0xaf, 0x86, 0x4c, 0x9a, 0xab, 0x41, 0xdf, 0x04, 0xff, - 0x5f, 0x65, 0xa2, 0x9a, 0xfa, 0x0f, 0xf5, 0x58, 0x35, 0x8d, 0xbf, 0xba, 0x63, 0x93, 0xb2, 0x5f, - 0x68, 0x00, 0xf2, 0xd5, 0xb8, 0x73, 0x42, 0x5c, 0x2e, 0x1c, 0x13, 0x27, 0x30, 0xea, 0x98, 0x4c, - 0x23, 0xc9, 0x41, 0xf7, 0x20, 0xef, 0xc9, 0xa6, 0x4a, 0x8d, 0xae, 0xa6, 0x9c, 0x02, 0x84, 0x51, - 0xe7, 0x77, 0x66, 0x58, 0x81, 0x19, 0xeb, 0x4f, 0x9f, 0x57, 0x67, 0x9e, 0x3d, 0xaf, 0xce, 0x7c, - 0xf4, 0xbc, 0x3a, 0xf3, 0xc1, 0xb0, 0xaa, 0x3d, 0x1d, 0x56, 0xb5, 0x67, 0xc3, 0xaa, 0xf6, 0xd1, - 0xb0, 0xaa, 0x7d, 0x3c, 0xac, 0x6a, 0x8f, 0x3f, 0xa9, 0xce, 0x3c, 0xc8, 0x9c, 0x6c, 0xfc, 0x27, - 0x00, 0x00, 0xff, 0xff, 0x66, 0xe7, 0x2a, 0x84, 0x4b, 0x20, 0x00, 0x00, + 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x16, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, + 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0xe2, 0xe0, + 0xbc, 0x9d, 0x6e, 0xed, 0xad, 0xb8, 0xaa, 0xb1, 0x3c, 0x1c, 0xd4, 0xca, 0x09, 0x12, 0x4e, 0x82, + 0xa3, 0xef, 0xc2, 0x92, 0x47, 0x7b, 0x47, 0xa6, 0xdb, 0x24, 0x3d, 0xe2, 0xb6, 0x89, 0xcb, 0x99, + 0x3c, 0xcc, 0x05, 0x63, 0x45, 0x94, 0xdd, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x72, + 0x8f, 0x7a, 0x3d, 0xb3, 0x63, 0x0a, 0xc4, 0x96, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0xec, 0x45, 0xe3, + 0xea, 0x70, 0x50, 0x5b, 0x6e, 0x8d, 0x32, 0xcf, 0x07, 0xb5, 0x57, 0x64, 0xe8, 0x04, 0x25, 0x62, + 0xe2, 0x71, 0x18, 0x7d, 0x17, 0x0a, 0xcd, 0x3e, 0x95, 0x14, 0xf4, 0x6d, 0x28, 0xb4, 0xd5, 0x6f, + 0x15, 0xd5, 0xd7, 0x82, 0x3b, 0x29, 0x90, 0x39, 0x1f, 0xd4, 0xca, 0xe2, 0xea, 0xad, 0x07, 0x04, + 0x1c, 0xaa, 0xe8, 0x0f, 0xa1, 0xbc, 0x73, 0xda, 0xf3, 0x28, 0x0f, 0xf6, 0xeb, 0x0d, 0xc8, 0x13, + 0x49, 0x90, 0x68, 0x85, 0xa8, 0x90, 0xfa, 0x62, 0x58, 0x71, 0xc5, 0xc1, 0x26, 0xa7, 0xa6, 0xc5, + 0x55, 0x45, 0x0c, 0x0f, 0xf6, 0x8e, 0x20, 0x62, 0x9f, 0xa7, 0x3f, 0xd1, 0x00, 0x6e, 0x91, 0x10, + 0x7b, 0x0b, 0x16, 0x83, 0x43, 0x91, 0x3c, 0xab, 0x9f, 0x57, 0xda, 0x8b, 0x38, 0xc9, 0xc6, 0xa3, + 0xf2, 0xa8, 0x05, 0x2b, 0xb6, 0x6b, 0x39, 0xfd, 0x36, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x6d, 0xd3, + 0xb1, 0x7f, 0x12, 0xd6, 0xe5, 0x2f, 0x28, 0x9c, 0x95, 0xdd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, + 0x43, 0x28, 0xca, 0x0a, 0x21, 0x8a, 0x73, 0x54, 0xae, 0xb4, 0x17, 0x94, 0xab, 0xa0, 0xba, 0x67, + 0x26, 0x55, 0xf7, 0xd8, 0x81, 0x70, 0xa0, 0xec, 0xeb, 0x06, 0x17, 0x4e, 0x2a, 0x0b, 0x57, 0xa1, + 0x10, 0x2c, 0x5c, 0x59, 0x09, 0x1b, 0x8d, 0x00, 0x08, 0x87, 0x12, 0x31, 0x6b, 0x47, 0x90, 0xa8, + 0x76, 0xe9, 0x8c, 0xc5, 0xaa, 0x6f, 0xe6, 0xc5, 0xd5, 0x37, 0x66, 0xe9, 0x67, 0x50, 0x99, 0xd4, + 0x9d, 0xbc, 0x44, 0x3d, 0x4e, 0xef, 0x8a, 0xfe, 0x6b, 0x0d, 0x96, 0xe2, 0x48, 0xe9, 0xb7, 0x2f, + 0xbd, 0x91, 0xcb, 0xef, 0xf1, 0x58, 0x44, 0x7e, 0xab, 0xc1, 0x4a, 0x62, 0x69, 0x53, 0xed, 0xf8, + 0x14, 0x4e, 0xc5, 0x93, 0x23, 0x3b, 0x45, 0x72, 0x34, 0xa0, 0xb4, 0x1b, 0xe6, 0x3d, 0xbd, 0xbc, + 0xf3, 0xd1, 0xff, 0xa2, 0xc1, 0x7c, 0x4c, 0x83, 0xa1, 0x87, 0x30, 0x27, 0xea, 0x9b, 0xed, 0x76, + 0x54, 0x57, 0x96, 0xf2, 0xb2, 0x8c, 0x81, 0x44, 0xeb, 0x6a, 0xf9, 0x48, 0x38, 0x80, 0x44, 0x2d, + 0xc8, 0x53, 0xc2, 0xfa, 0x0e, 0x57, 0xa5, 0xfd, 0x6a, 0xca, 0x6b, 0x8d, 0x9b, 0xbc, 0xcf, 0x0c, + 0x10, 0x35, 0x0a, 0x4b, 0x7d, 0xac, 0x70, 0xf4, 0xbf, 0x67, 0xa0, 0x7c, 0xdb, 0x3c, 0x20, 0xce, + 0x3e, 0x71, 0x88, 0xc5, 0x3d, 0x8a, 0x7e, 0x0a, 0xa5, 0xae, 0xc9, 0xad, 0x23, 0x49, 0x0d, 0x7a, + 0xcb, 0x66, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x5e, 0x04, 0xb3, 0xe3, 0x72, 0x7a, 0x66, 0xbc, 0xa2, + 0x16, 0x56, 0x8a, 0x71, 0x70, 0xdc, 0x9a, 0x7c, 0x10, 0xc8, 0xef, 0x9d, 0xd3, 0x9e, 0xb8, 0x44, + 0xa7, 0x7f, 0x87, 0x24, 0x5c, 0xc0, 0xe4, 0xfd, 0xbe, 0x4d, 0x49, 0x97, 0xb8, 0x3c, 0x7a, 0x10, + 0xec, 0x8d, 0xe0, 0xe3, 0x31, 0x8b, 0xab, 0x37, 0x60, 0x69, 0xd4, 0x79, 0xb4, 0x04, 0xd9, 0x63, + 0x72, 0xe6, 0xe7, 0x02, 0x16, 0x3f, 0xd1, 0x0a, 0xe4, 0x4e, 0x4c, 0xa7, 0xaf, 0xea, 0x0f, 0xf6, + 0x3f, 0xae, 0x67, 0xae, 0x69, 0xfa, 0xef, 0x35, 0xa8, 0x4c, 0x72, 0x04, 0x7d, 0x31, 0x06, 0x64, + 0x94, 0x94, 0x57, 0xd9, 0x77, 0xc9, 0x99, 0x8f, 0xba, 0x03, 0x05, 0xaf, 0x27, 0x9e, 0x70, 0x1e, + 0x55, 0x79, 0xfe, 0x66, 0x90, 0xbb, 0x77, 0x15, 0xfd, 0x7c, 0x50, 0xbb, 0x92, 0x80, 0x0f, 0x18, + 0x38, 0x54, 0x45, 0x3a, 0xe4, 0xa5, 0x3f, 0xe2, 0x52, 0x16, 0xed, 0x93, 0xdc, 0xfc, 0xfb, 0x92, + 0x82, 0x15, 0x47, 0xff, 0x93, 0x06, 0xb3, 0xb2, 0x3d, 0x7c, 0x08, 0x05, 0x11, 0xbf, 0xb6, 0xc9, + 0x4d, 0xe9, 0x57, 0xea, 0xc7, 0x84, 0xd0, 0xde, 0x23, 0xdc, 0x8c, 0xce, 0x57, 0x40, 0xc1, 0x21, + 0x22, 0xc2, 0x90, 0xb3, 0x39, 0xe9, 0x06, 0x1b, 0xf9, 0xd6, 0x44, 0x68, 0xf5, 0xfe, 0xad, 0x63, + 0xf3, 0xd1, 0xce, 0x29, 0x27, 0xae, 0xd8, 0x8c, 0xa8, 0x18, 0xec, 0x0a, 0x0c, 0xec, 0x43, 0xe9, + 0x7f, 0xd0, 0x20, 0x34, 0x25, 0x8e, 0x3b, 0x23, 0xce, 0xe1, 0x6d, 0xdb, 0x3d, 0x56, 0x61, 0x0d, + 0xdd, 0xd9, 0x57, 0x74, 0x1c, 0x4a, 0x5c, 0x74, 0xc5, 0x66, 0xa6, 0xbc, 0x62, 0xaf, 0x42, 0xc1, + 0xf2, 0x5c, 0x6e, 0xbb, 0xfd, 0xb1, 0xfa, 0xb2, 0xad, 0xe8, 0x38, 0x94, 0xd0, 0x9f, 0x65, 0xa1, + 0x24, 0x7c, 0x0d, 0xee, 0xf8, 0x6f, 0x42, 0xd9, 0x89, 0xef, 0x9e, 0xf2, 0xf9, 0x8a, 0x82, 0x48, + 0x9e, 0x47, 0x9c, 0x94, 0x15, 0xca, 0x87, 0x36, 0x71, 0xda, 0xa1, 0x72, 0x26, 0xa9, 0x7c, 0x33, + 0xce, 0xc4, 0x49, 0x59, 0x51, 0x67, 0x1f, 0x89, 0xbc, 0x56, 0x8d, 0x5a, 0x18, 0xda, 0xef, 0x09, + 0x22, 0xf6, 0x79, 0x17, 0xc5, 0x67, 0x76, 0xca, 0xf8, 0x5c, 0x87, 0x05, 0xb1, 0x91, 0x5e, 0x9f, + 0x07, 0xdd, 0x6c, 0x4e, 0xf6, 0x5d, 0x68, 0x38, 0xa8, 0x2d, 0xbc, 0x97, 0xe0, 0xe0, 0x11, 0xc9, + 0x89, 0xed, 0x4b, 0xfe, 0xd3, 0xb6, 0x2f, 0x62, 0xd5, 0x8e, 0xdd, 0xb5, 0x79, 0x65, 0x4e, 0x3a, + 0x11, 0xae, 0xfa, 0xb6, 0x20, 0x62, 0x9f, 0x97, 0xd8, 0xd2, 0xc2, 0xa5, 0x5b, 0xfa, 0x3e, 0x14, + 0xf7, 0x6c, 0x8b, 0x7a, 0x62, 0x2d, 0xe2, 0x62, 0x62, 0x89, 0xa6, 0x3d, 0x2c, 0xe0, 0xc1, 0x1a, + 0x03, 0xbe, 0x70, 0xc5, 0x35, 0x5d, 0xcf, 0x6f, 0xcd, 0x73, 0x91, 0x2b, 0x77, 0x04, 0x11, 0xfb, + 0xbc, 0xeb, 0x2b, 0xe2, 0x3e, 0xfa, 0xe5, 0x93, 0xda, 0xcc, 0xe3, 0x27, 0xb5, 0x99, 0x0f, 0x9f, + 0xa8, 0xbb, 0xe9, 0x5f, 0x00, 0x70, 0xf7, 0xe0, 0xc7, 0xc4, 0xf2, 0x73, 0xfe, 0xf2, 0x57, 0xb9, + 0xe8, 0x31, 0xd4, 0x30, 0x48, 0xbe, 0x60, 0x33, 0x23, 0x3d, 0x46, 0x8c, 0x87, 0x13, 0x92, 0xa8, + 0x01, 0xc5, 0xf0, 0xa5, 0xae, 0xf2, 0x7b, 0x59, 0xa9, 0x15, 0xc3, 0xe7, 0x3c, 0x8e, 0x64, 0x12, + 0x07, 0x70, 0xf6, 0xd2, 0x03, 0x68, 0x40, 0xb6, 0x6f, 0xb7, 0x65, 0x4a, 0x14, 0x8d, 0xaf, 0x06, + 0x05, 0xf0, 0xde, 0x6e, 0xf3, 0x7c, 0x50, 0x7b, 0x6d, 0xd2, 0x8c, 0x8b, 0x9f, 0xf5, 0x08, 0xab, + 0xdf, 0xdb, 0x6d, 0x62, 0xa1, 0x7c, 0x51, 0x92, 0xe6, 0xa7, 0x4c, 0xd2, 0x4d, 0x00, 0xb5, 0x6a, + 0xa1, 0xed, 0xe7, 0x46, 0x38, 0xb5, 0xb8, 0x15, 0x72, 0x70, 0x4c, 0x0a, 0x31, 0x58, 0xb6, 0x28, + 0x91, 0xbf, 0xc5, 0xd6, 0x33, 0x6e, 0x76, 0xfd, 0x77, 0x7b, 0x69, 0xf3, 0xcb, 0xe9, 0x2a, 0xa6, + 0x50, 0x33, 0x5e, 0x55, 0x66, 0x96, 0xb7, 0x47, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xb9, 0xad, + 0x5e, 0x3d, 0x91, 0xd1, 0xe2, 0xd4, 0x46, 0xaf, 0x08, 0x83, 0xcd, 0x51, 0x20, 0x3c, 0x8e, 0x8d, + 0x7e, 0x08, 0xab, 0x01, 0x71, 0xfc, 0xe9, 0x59, 0x01, 0x19, 0xa9, 0xaa, 0x78, 0x0c, 0x37, 0x27, + 0x4a, 0xe1, 0x17, 0x20, 0xa0, 0x36, 0xe4, 0x1d, 0xbf, 0xbb, 0x28, 0xc9, 0x1b, 0xe1, 0x5b, 0xe9, + 0x56, 0x11, 0x65, 0x7f, 0x3d, 0xde, 0x55, 0x84, 0xcf, 0x2f, 0xd5, 0x50, 0x28, 0x6c, 0x74, 0x0a, + 0x25, 0xd3, 0x75, 0x3d, 0x6e, 0xfa, 0x8f, 0xe1, 0x79, 0x69, 0x6a, 0x6b, 0x6a, 0x53, 0x5b, 0x11, + 0xc6, 0x48, 0x17, 0x13, 0xe3, 0xe0, 0xb8, 0x29, 0xf4, 0x08, 0x16, 0xbd, 0x47, 0x2e, 0xa1, 0x98, + 0x1c, 0x12, 0x4a, 0x5c, 0x8b, 0xb0, 0x4a, 0x59, 0x5a, 0xff, 0x5a, 0x4a, 0xeb, 0x09, 0xe5, 0x28, + 0xa5, 0x93, 0x74, 0x86, 0x47, 0xad, 0xa0, 0x3a, 0xc0, 0xa1, 0xed, 0xaa, 0x5e, 0xb4, 0xb2, 0x10, + 0x8d, 0x9e, 0x6e, 0x86, 0x54, 0x1c, 0x93, 0x40, 0x5f, 0x87, 0x92, 0xe5, 0xf4, 0x19, 0x27, 0xfe, + 0x8c, 0x6b, 0x51, 0x9e, 0xa0, 0x70, 0x7d, 0xdb, 0x11, 0x0b, 0xc7, 0xe5, 0xd0, 0x11, 0xcc, 0xdb, + 0xb1, 0xa6, 0xb7, 0xb2, 0x24, 0x73, 0x71, 0x73, 0xea, 0x4e, 0x97, 0x19, 0x4b, 0xa2, 0x12, 0xc5, + 0x29, 0x38, 0x81, 0xbc, 0xfa, 0x0d, 0x28, 0x7d, 0xca, 0x1e, 0x4c, 0xf4, 0x70, 0xa3, 0x5b, 0x37, + 0x55, 0x0f, 0xf7, 0xd7, 0x0c, 0x2c, 0x24, 0x03, 0x1e, 0xbe, 0x75, 0xb4, 0x89, 0x33, 0xcb, 0xa0, + 0x2a, 0x67, 0x27, 0x56, 0x65, 0x55, 0xfc, 0x66, 0x5f, 0xa6, 0xf8, 0x6d, 0x02, 0x98, 0x3d, 0x3b, + 0xa8, 0x7b, 0x7e, 0x1d, 0x0d, 0x2b, 0x57, 0x34, 0x45, 0xc3, 0x31, 0x29, 0x39, 0x95, 0xf4, 0x5c, + 0x4e, 0x3d, 0xc7, 0x21, 0x54, 0x5d, 0xa6, 0xfe, 0x54, 0x32, 0xa4, 0xe2, 0x98, 0x04, 0xba, 0x09, + 0xe8, 0xc0, 0xf1, 0xac, 0x63, 0x19, 0x82, 0xe0, 0x9c, 0xcb, 0x2a, 0x59, 0xf0, 0x87, 0x52, 0xc6, + 0x18, 0x17, 0x5f, 0xa0, 0xa1, 0xcf, 0x41, 0xae, 0x25, 0xda, 0x0a, 0xfd, 0x2e, 0x24, 0xe7, 0x49, + 0xe8, 0x86, 0x1f, 0x09, 0x2d, 0x1c, 0xf8, 0x4c, 0x17, 0x05, 0xfd, 0x2a, 0x14, 0xb1, 0xe7, 0xf1, + 0x96, 0xc9, 0x8f, 0x18, 0xaa, 0x41, 0xae, 0x27, 0x7e, 0xa8, 0x61, 0xa1, 0x9c, 0xff, 0x4a, 0x0e, + 0xf6, 0xe9, 0xfa, 0xaf, 0x34, 0x78, 0x75, 0xe2, 0xec, 0x4e, 0x44, 0xd4, 0x0a, 0xbf, 0x94, 0x4b, + 0x61, 0x44, 0x23, 0x39, 0x1c, 0x93, 0x12, 0x9d, 0x58, 0x62, 0xe0, 0x37, 0xda, 0x89, 0x25, 0xac, + 0xe1, 0xa4, 0xac, 0xfe, 0xef, 0x0c, 0xe4, 0xfd, 0x67, 0xd9, 0x7f, 0xb9, 0xf9, 0x7e, 0x03, 0xf2, + 0x4c, 0xda, 0x51, 0xee, 0x85, 0xd5, 0xd2, 0xb7, 0x8e, 0x15, 0x57, 0x34, 0x31, 0x5d, 0xc2, 0x98, + 0xd9, 0x09, 0x92, 0x37, 0x6c, 0x62, 0xf6, 0x7c, 0x32, 0x0e, 0xf8, 0xe8, 0x1d, 0xf1, 0x0a, 0x35, + 0x59, 0xd8, 0x17, 0x56, 0x03, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xda, 0xbc, 0x02, 0x97, 0xdf, 0x58, + 0x49, 0xa3, 0x07, 0x30, 0xd7, 0x26, 0xdc, 0xb4, 0x1d, 0xbf, 0x1d, 0x4c, 0x3d, 0x99, 0xf4, 0xc1, + 0x9a, 0xbe, 0xaa, 0x51, 0x12, 0x3e, 0xa9, 0x0f, 0x1c, 0x00, 0x8a, 0x83, 0x67, 0x79, 0x6d, 0x7f, + 0x4c, 0x9f, 0x8b, 0x0e, 0xde, 0xb6, 0xd7, 0x26, 0x58, 0x72, 0xf4, 0xc7, 0x1a, 0x94, 0x7c, 0xa4, + 0x6d, 0xb3, 0xcf, 0x08, 0xda, 0x08, 0x57, 0xe1, 0x6f, 0x77, 0x70, 0x27, 0xcf, 0xbe, 0x77, 0xd6, + 0x23, 0xe7, 0x83, 0x5a, 0x51, 0x8a, 0x89, 0x8f, 0x70, 0x01, 0xb1, 0x18, 0x65, 0x2e, 0x89, 0xd1, + 0xeb, 0x90, 0x93, 0xad, 0xb7, 0x0a, 0x66, 0xd8, 0xe8, 0xc9, 0xf6, 0x1c, 0xfb, 0x3c, 0xfd, 0xe3, + 0x0c, 0x94, 0x13, 0x8b, 0x4b, 0xd1, 0xd5, 0x85, 0xa3, 0x92, 0x4c, 0x8a, 0xf1, 0xdb, 0xe4, 0x7f, + 0xae, 0x7c, 0x1f, 0xf2, 0x96, 0x58, 0x5f, 0xf0, 0xdf, 0xad, 0x8d, 0x69, 0xb6, 0x42, 0x46, 0x26, + 0xca, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x99, 0x12, 0x4e, 0xcf, 0xb6, 0x0e, 0x39, + 0xa1, 0xf1, 0xfe, 0x3f, 0x17, 0xf5, 0x3d, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x04, 0xa5, 0x32, 0xff, + 0x12, 0xa5, 0x52, 0x77, 0x60, 0xf6, 0x7f, 0xd8, 0xa3, 0xff, 0x00, 0x8a, 0x51, 0x17, 0xf5, 0x19, + 0x9b, 0xd4, 0x7f, 0x04, 0x05, 0x91, 0x8d, 0x41, 0xf7, 0x7f, 0xc9, 0x4d, 0x94, 0xbc, 0x23, 0x32, + 0x69, 0xee, 0x08, 0x7d, 0x13, 0xfc, 0xff, 0x99, 0x89, 0x6a, 0xea, 0xbf, 0xd8, 0x63, 0xd5, 0x34, + 0xfe, 0xfc, 0x8e, 0x8d, 0xcc, 0x7e, 0xa1, 0x01, 0xc8, 0xe7, 0xe3, 0xce, 0x09, 0x71, 0xb9, 0x70, + 0x4c, 0xec, 0xc0, 0xa8, 0x63, 0xf2, 0x18, 0x49, 0x0e, 0xba, 0x07, 0x79, 0x4f, 0x76, 0x57, 0x6a, + 0x86, 0x35, 0xe5, 0x38, 0x20, 0xcc, 0x3a, 0xbf, 0x45, 0xc3, 0x0a, 0xcc, 0x58, 0x7f, 0xfa, 0xbc, + 0x3a, 0xf3, 0xec, 0x79, 0x75, 0xe6, 0xa3, 0xe7, 0xd5, 0x99, 0x0f, 0x86, 0x55, 0xed, 0xe9, 0xb0, + 0xaa, 0x3d, 0x1b, 0x56, 0xb5, 0x8f, 0x86, 0x55, 0xed, 0xe3, 0x61, 0x55, 0x7b, 0xfc, 0x49, 0x75, + 0xe6, 0x41, 0xe6, 0x64, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x28, 0xb2, 0x54, + 0x20, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index a547d78d..ab23a071 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -149,6 +149,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -382,7 +386,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -510,15 +514,16 @@ message ObjectMeta { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -616,6 +621,10 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. @@ -781,7 +790,7 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 6e449a43..b300d370 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -70,7 +70,6 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) AddConversionFuncs(scheme) RegisterDefaults(scheme) } @@ -90,6 +89,5 @@ func init() { ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) RegisterDefaults(scheme) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 13ae66c6..c8ee4e5d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -177,15 +177,16 @@ type ObjectMeta struct { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -445,6 +446,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 49d2de1e..5dbba4b0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -90,7 +90,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -214,7 +214,7 @@ var map_ObjectMeta = map[string]string{ "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go new file mode 100644 index 00000000..8d035254 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -0,0 +1,453 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// NestedFieldCopy returns a deep copy of the value of a nested field. +// false is returned if the value is missing. +// nil, true is returned for a nil field. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSONValue(val), true +} + +func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + var val interface{} = obj + for _, field := range fields { + if m, ok := val.(map[string]interface{}); ok { + val, ok = m[field] + if !ok { + return nil, false + } + } else { + // Expected map[string]interface{}, got something else + return nil, false + } + } + return val, true +} + +// NestedString returns the string value of a nested field. +// Returns false if value is not found or is not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return "", false + } + s, ok := val.(string) + return s, ok +} + +// NestedBool returns the bool value of a nested field. +// Returns false if value is not found or is not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return false, false + } + b, ok := val.(bool) + return b, ok +} + +// NestedFloat64 returns the bool value of a nested field. +// Returns false if value is not found or is not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + f, ok := val.(float64) + return f, ok +} + +// NestedInt64 returns the int64 value of a nested field. +// Returns false if value is not found or is not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + i, ok := val.(int64) + return i, ok +} + +// NestedStringSlice returns a copy of []string value of a nested field. +// Returns false if value is not found, is not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if m, ok := val.([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false + } + } + return strSlice, true + } + return nil, false +} + +// NestedSlice returns a deep copy of []interface{} value of a nested field. +// Returns false if value is not found or is not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if _, ok := val.([]interface{}); ok { + return runtime.DeepCopyJSONValue(val).([]interface{}), true + } + return nil, false +} + +// NestedStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found, is not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else { + return nil, false + } + } + return strMap, true +} + +// NestedMap returns a deep copy of map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSON(m), true +} + +// nestedMapNoCopy returns a map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + m, ok := val.(map[string]interface{}) + return m, ok +} + +// SetNestedField sets the value of a nested field to a deep copy of the value provided. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) bool { + return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) +} + +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) bool { + m := obj + for _, field := range fields[:len(fields)-1] { + if val, ok := m[field]; ok { + if valMap, ok := val.(map[string]interface{}); ok { + m = valMap + } else { + return false + } + } else { + newVal := make(map[string]interface{}) + m[field] = newVal + m = newVal + } + } + m[fields[len(fields)-1]] = value + return true +} + +// SetNestedStringSlice sets the string slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) bool { + m := make([]interface{}, 0, len(value)) // convert []string into []interface{} + for _, v := range value { + m = append(m, v) + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedSlice sets the slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// SetNestedStringMap sets the map[string]string value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) bool { + m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} + for k, v := range value { + m[k] = v + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedMap sets the map[string]interface{} value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// RemoveNestedField removes the nested field from the obj. +func RemoveNestedField(obj map[string]interface{}, fields ...string) { + m := obj + for _, field := range fields[:len(fields)-1] { + if x, ok := m[field].(map[string]interface{}); ok { + m = x + } else { + return + } + } + delete(m, fields[len(fields)-1]) +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, ok := NestedString(obj, fields...) + if !ok { + return "" + } + return val +} + +func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + if controller, ok := NestedBool(v, "controller"); ok { + controllerPtr = &controller + } + var blockOwnerDeletionPtr *bool + if blockOwnerDeletion, ok := NestedBool(v, "blockOwnerDeletion"); ok { + blockOwnerDeletionPtr = &blockOwnerDeletion + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: types.UID(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + listObj := make(map[string]interface{}, len(t.Object)+1) + for k, v := range t.Object { // Make a shallow copy + listObj[k] = v + } + listObj["items"] = items + return json.NewEncoder(w).Encode(listObj) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = make([]Unstructured, 0, len(dList.Items)) + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 7213b7a1..36e769bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -18,20 +18,13 @@ package unstructured import ( "bytes" - gojson "encoding/json" "errors" "fmt" - "io" - "strings" - - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -54,39 +47,31 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ runtime.Unstructured = &UnstructuredList{} -func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } - -func (obj *Unstructured) IsUnstructuredObject() {} -func (obj *UnstructuredList) IsUnstructuredObject() {} +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } func (obj *Unstructured) IsList() bool { - if obj.Object != nil { - _, ok := obj.Object["items"] - return ok - } - return false -} -func (obj *UnstructuredList) IsList() bool { return true } - -func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { - if obj.Object == nil { - return fmt.Errorf("content is not a list") - } field, ok := obj.Object["items"] if !ok { - return fmt.Errorf("content is not a list") + return false + } + _, ok = field.([]interface{}) + return ok +} + +func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { + field, ok := obj.Object["items"] + if !ok { + return errors.New("content is not a list") } items, ok := field.([]interface{}) if !ok { - return nil + return fmt.Errorf("content is not a list: %T", field) } for _, item := range items { child, ok := item.(map[string]interface{}) if !ok { - return fmt.Errorf("items member is not an object") + return fmt.Errorf("items member is not an object: %T", child) } if err := fn(&Unstructured{Object: child}); err != nil { return err @@ -95,15 +80,6 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } -func (obj *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { - for i := range obj.Items { - if err := fn(&obj.Items[i]); err != nil { - return err - } - } - return nil -} - func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { obj.Object = make(map[string]interface{}) @@ -111,23 +87,8 @@ func (obj *Unstructured) UnstructuredContent() map[string]interface{} { return obj.Object } -// UnstructuredContent returns a map contain an overlay of the Items field onto -// the Object field. Items always overwrites overlay. Changing "items" in the -// returned object will affect items in the underlying Items field, but changing -// the "items" slice itself will have no effect. -// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows -// items to be changed. -func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { - out := obj.Object - if out == nil { - out = make(map[string]interface{}) - } - items := make([]interface{}, len(obj.Items)) - for i, item := range obj.Items { - items[i] = item.Object - } - out["items"] = items - return out +func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content } // MarshalJSON ensures that the unstructured object produces proper @@ -151,227 +112,61 @@ func (in *Unstructured) DeepCopy() *Unstructured { } out := new(Unstructured) *out = *in - out.Object = unstructured.DeepCopyJSON(in.Object) + out.Object = runtime.DeepCopyJSON(in.Object) return out } -func (in *UnstructuredList) DeepCopy() *UnstructuredList { - if in == nil { - return nil - } - out := new(UnstructuredList) - *out = *in - out.Object = unstructured.DeepCopyJSON(in.Object) - out.Items = make([]Unstructured, len(in.Items)) - for i := range in.Items { - in.Items[i].DeepCopyInto(&out.Items[i]) - } - return out -} - -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] - } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - if str, ok := getNestedField(obj, fields...).(int64); ok { - return str - } - return 0 -} - -func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - nested := getNestedField(obj, fields...) - switch n := nested.(type) { - case int64: - return &n - case *int64: - return n - default: - return nil - } -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) -} - func (u *Unstructured) setNestedField(value interface{}, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedField(u.Object, value, fields...) + SetNestedField(u.Object, value, fields...) } func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedSlice(u.Object, value, fields...) + SetNestedStringSlice(u.Object, value, fields...) } func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedMap(u.Object, value, fields...) -} - -func extractOwnerReference(src interface{}) metav1.OwnerReference { - v := src.(map[string]interface{}) - // though this field is a *bool, but when decoded from JSON, it's - // unmarshalled as bool. - var controllerPtr *bool - controller, ok := (getNestedField(v, "controller")).(bool) - if !ok { - controllerPtr = nil - } else { - controllerCopy := controller - controllerPtr = &controllerCopy - } - var blockOwnerDeletionPtr *bool - blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) - if !ok { - blockOwnerDeletionPtr = nil - } else { - blockOwnerDeletionCopy := blockOwnerDeletion - blockOwnerDeletionPtr = &blockOwnerDeletionCopy - } - return metav1.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - BlockOwnerDeletion: blockOwnerDeletionPtr, - } -} - -func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - // json.Unmarshal() extracts boolean json fields as bool, not as *bool and hence extractOwnerReference() - // expects bool or a missing field, not *bool. So if pointer is nil, fields are omitted from the ret object. - // If pointer is non-nil, they are set to the referenced value. - if src.Controller != nil { - setNestedField(ret, *src.Controller, "controller") - } - if src.BlockOwnerDeletion != nil { - setNestedField(ret, *src.BlockOwnerDeletion, "blockOwnerDeletion") - } - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil - } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) - if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil + SetNestedStringMap(u.Object, value, fields...) } func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) + field, ok := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + if !ok { + return nil + } + original, ok := field.([]interface{}) + if !ok { return nil } ret := make([]metav1.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) + for _, obj := range original { + o, ok := obj.(map[string]interface{}) + if !ok { + // expected map[string]interface{}, got something else + return nil + } + ret = append(ret, extractOwnerReference(o)) } return ret } func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) + newReferences := make([]interface{}, 0, len(references)) + for _, reference := range references { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err)) + continue + } + newReferences = append(newReferences, out) } u.setNestedField(newReferences, "metadata", "ownerReferences") } @@ -433,7 +228,11 @@ func (u *Unstructured) SetResourceVersion(version string) { } func (u *Unstructured) GetGeneration() int64 { - return getNestedInt64(u.Object, "metadata", "generation") + val, ok := NestedInt64(u.Object, "metadata", "generation") + if !ok { + return 0 + } + return val } func (u *Unstructured) SetGeneration(generation int64) { @@ -464,6 +263,10 @@ func (u *Unstructured) GetCreationTimestamp() metav1.Time { func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { ts, _ := timestamp.MarshalQueryParameter() + if len(ts) == 0 || timestamp.Time.IsZero() { + RemoveNestedField(u.Object, "metadata", "creationTimestamp") + return + } u.setNestedField(ts, "metadata", "creationTimestamp") } @@ -478,7 +281,7 @@ func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { if timestamp == nil { - u.setNestedField(nil, "metadata", "deletionTimestamp") + RemoveNestedField(u.Object, "metadata", "deletionTimestamp") return } ts, _ := timestamp.MarshalQueryParameter() @@ -486,15 +289,24 @@ func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { } func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "deletionGracePeriodSeconds") + val, ok := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !ok { + return nil + } + return &val } func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { - u.setNestedField(deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") + if deletionGracePeriodSeconds == nil { + RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds") + return + } + u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") } func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") + m, _ := NestedStringMap(u.Object, "metadata", "labels") + return m } func (u *Unstructured) SetLabels(labels map[string]string) { @@ -502,7 +314,8 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") + m, _ := NestedStringMap(u.Object, "metadata", "annotations") + return m } func (u *Unstructured) SetAnnotations(annotations map[string]string) { @@ -523,41 +336,34 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -var converter = unstructured.NewConverter(false) - func (u *Unstructured) GetInitializers() *metav1.Initializers { - field := getNestedField(u.Object, "metadata", "initializers") - if field == nil { - return nil - } - obj, ok := field.(map[string]interface{}) + m, ok := nestedMapNoCopy(u.Object, "metadata", "initializers") if !ok { return nil } out := &metav1.Initializers{} - if err := converter.FromUnstructured(obj, out); err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + return nil } return out } func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } if initializers == nil { - setNestedField(u.Object, nil, "metadata", "initializers") + RemoveNestedField(u.Object, "metadata", "initializers") return } - out, err := converter.ToUnstructured(initializers) + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) } - setNestedField(u.Object, out, "metadata", "initializers") + u.setNestedField(out, "metadata", "initializers") } func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") + val, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + return val } func (u *Unstructured) SetFinalizers(finalizers []string) { @@ -571,272 +377,3 @@ func (u *Unstructured) GetClusterName() string { func (u *Unstructured) SetClusterName(clusterName string) { u.setNestedField(clusterName, "metadata", "clusterName") } - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []Unstructured `json:"items"` -} - -var _ metav1.ListInterface = &UnstructuredList{} - -// MarshalJSON ensures that the unstructured list object produces proper -// JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured list object properly -// decodes JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) GetContinue() string { - return getNestedString(u.Object, "metadata", "continue") -} - -func (u *UnstructuredList) SetContinue(c string) { - u.setNestedField(c, "metadata", "continue") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { - gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return schema.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, runtime.NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - listObj := make(map[string]interface{}, len(t.Object)+1) - for k, v := range t.Object { // Make a shallow copy - listObj[k] = v - } - listObj["items"] = items - return json.NewEncoder(w).Encode(listObj) - case *runtime.Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} - -func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, *unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go new file mode 100644 index 00000000..57d78a09 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -0,0 +1,189 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.Unstructured = &UnstructuredList{} +var _ metav1.ListInterface = &UnstructuredList{} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u } + +func (u *UnstructuredList) IsList() bool { return true } + +func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&u.Items[i]); err != nil { + return err + } + } + return nil +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := u.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(u.Items)) + for i, item := range u.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// SetUnstructuredContent obeys the conventions of List and keeps Items and the items +// array in sync. If items is not an array of objects in the incoming map, then any +// mismatched item will be removed. +func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content + if content == nil { + obj.Items = nil + return + } + items, ok := obj.Object["items"].([]interface{}) + if !ok || items == nil { + items = []interface{}{} + } + unstructuredItems := make([]Unstructured, 0, len(items)) + newItems := make([]interface{}, 0, len(items)) + for _, item := range items { + o, ok := item.(map[string]interface{}) + if !ok { + continue + } + unstructuredItems = append(unstructuredItems, Unstructured{Object: o}) + newItems = append(newItems, o) + } + obj.Items = unstructuredItems + obj.Object["items"] = newItems +} + +func (u *UnstructuredList) DeepCopy() *UnstructuredList { + if u == nil { + return nil + } + out := new(UnstructuredList) + *out = *u + out.Object = runtime.DeepCopyJSON(u.Object) + out.Items = make([]Unstructured, len(u.Items)) + for i := range u.Items { + u.Items[i].DeepCopyInto(&out.Items[i]) + } + return out +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *UnstructuredList) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 248acf98..8f6a17bf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package unstructured import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unstructured).DeepCopyInto(out.(*Unstructured)) - return nil - }, InType: reflect.TypeOf(&Unstructured{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UnstructuredList).DeepCopyInto(out.(*UnstructuredList)) - return nil - }, InType: reflect.TypeOf(&UnstructuredList{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Unstructured) DeepCopyInto(out *Unstructured) { clone := in.DeepCopy() diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index c73e777b..ed458550 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -21,164 +21,10 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroup).DeepCopyInto(out.(*APIGroup)) - return nil - }, InType: reflect.TypeOf(&APIGroup{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroupList).DeepCopyInto(out.(*APIGroupList)) - return nil - }, InType: reflect.TypeOf(&APIGroupList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResource).DeepCopyInto(out.(*APIResource)) - return nil - }, InType: reflect.TypeOf(&APIResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResourceList).DeepCopyInto(out.(*APIResourceList)) - return nil - }, InType: reflect.TypeOf(&APIResourceList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersions).DeepCopyInto(out.(*APIVersions)) - return nil - }, InType: reflect.TypeOf(&APIVersions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Duration).DeepCopyInto(out.(*Duration)) - return nil - }, InType: reflect.TypeOf(&Duration{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExportOptions).DeepCopyInto(out.(*ExportOptions)) - return nil - }, InType: reflect.TypeOf(&ExportOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GetOptions).DeepCopyInto(out.(*GetOptions)) - return nil - }, InType: reflect.TypeOf(&GetOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupKind).DeepCopyInto(out.(*GroupKind)) - return nil - }, InType: reflect.TypeOf(&GroupKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResource).DeepCopyInto(out.(*GroupResource)) - return nil - }, InType: reflect.TypeOf(&GroupResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersion).DeepCopyInto(out.(*GroupVersion)) - return nil - }, InType: reflect.TypeOf(&GroupVersion{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionForDiscovery).DeepCopyInto(out.(*GroupVersionForDiscovery)) - return nil - }, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionKind).DeepCopyInto(out.(*GroupVersionKind)) - return nil - }, InType: reflect.TypeOf(&GroupVersionKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionResource).DeepCopyInto(out.(*GroupVersionResource)) - return nil - }, InType: reflect.TypeOf(&GroupVersionResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializers).DeepCopyInto(out.(*Initializers)) - return nil - }, InType: reflect.TypeOf(&Initializers{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InternalEvent).DeepCopyInto(out.(*InternalEvent)) - return nil - }, InType: reflect.TypeOf(&InternalEvent{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelector).DeepCopyInto(out.(*LabelSelector)) - return nil - }, InType: reflect.TypeOf(&LabelSelector{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelectorRequirement).DeepCopyInto(out.(*LabelSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListMeta).DeepCopyInto(out.(*ListMeta)) - return nil - }, InType: reflect.TypeOf(&ListMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MicroTime).DeepCopyInto(out.(*MicroTime)) - return nil - }, InType: reflect.TypeOf(&MicroTime{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*OwnerReference).DeepCopyInto(out.(*OwnerReference)) - return nil - }, InType: reflect.TypeOf(&OwnerReference{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Patch).DeepCopyInto(out.(*Patch)) - return nil - }, InType: reflect.TypeOf(&Patch{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RootPaths).DeepCopyInto(out.(*RootPaths)) - return nil - }, InType: reflect.TypeOf(&RootPaths{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) - return nil - }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Status).DeepCopyInto(out.(*Status)) - return nil - }, InType: reflect.TypeOf(&Status{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusCause).DeepCopyInto(out.(*StatusCause)) - return nil - }, InType: reflect.TypeOf(&StatusCause{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusDetails).DeepCopyInto(out.(*StatusDetails)) - return nil - }, InType: reflect.TypeOf(&StatusDetails{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Time).DeepCopyInto(out.(*Time)) - return nil - }, InType: reflect.TypeOf(&Time{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Timestamp).DeepCopyInto(out.(*Timestamp)) - return nil - }, InType: reflect.TypeOf(&Timestamp{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WatchEvent).DeepCopyInto(out.(*WatchEvent)) - return nil - }, InType: reflect.TypeOf(&WatchEvent{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIGroup) DeepCopyInto(out *APIGroup) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go index 043456cd..4ae545d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -21,47 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadata).DeepCopyInto(out.(*PartialObjectMetadata)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadata{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadataList).DeepCopyInto(out.(*PartialObjectMetadataList)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadataList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Table).DeepCopyInto(out.(*Table)) - return nil - }, InType: reflect.TypeOf(&Table{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableColumnDefinition).DeepCopyInto(out.(*TableColumnDefinition)) - return nil - }, InType: reflect.TypeOf(&TableColumnDefinition{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableOptions).DeepCopyInto(out.(*TableOptions)) - return nil - }, InType: reflect.TypeOf(&TableOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRow).DeepCopyInto(out.(*TableRow)) - return nil - }, InType: reflect.TypeOf(&TableRow{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRowCondition).DeepCopyInto(out.(*TableRowCondition)) - return nil - }, InType: reflect.TypeOf(&TableRowCondition{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go deleted file mode 100644 index c5dec1f3..00000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conversion - -import ( - "fmt" - "reflect" -) - -// Cloner knows how to copy one type to another. -type Cloner struct { - // Map from the type to a function which can do the deep copy. - deepCopyFuncs map[reflect.Type]reflect.Value - generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error -} - -// NewCloner creates a new Cloner object. -func NewCloner() *Cloner { - c := &Cloner{ - deepCopyFuncs: map[reflect.Type]reflect.Value{}, - generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, - } - if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { - // If one of the deep-copy functions is malformed, detect it immediately. - panic(err) - } - return c -} - -// Prevent recursing into every byte... -func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { - if *in != nil { - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - *out = nil - } - return nil -} - -// Verifies whether a deep-copy function has a correct signature. -func verifyDeepCopyFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1) != ft.In(0) { - return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) - } - var forClonerType Cloner - if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2)) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - -// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner. -// deepCopyFunc must take three parameters: a type input, a pointer to a -// type output, and a pointer to Cloner. It should return an error. -// -// Example: -// c.RegisterGeneratedDeepCopyFunc( -// func(in Pod, out *Pod, c *Cloner) error { -// // deep copy logic... -// return nil -// }) -func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { - fv := reflect.ValueOf(deepCopyFunc) - ft := fv.Type() - if err := verifyDeepCopyFunctionSignature(ft); err != nil { - return err - } - c.deepCopyFuncs[ft.In(0)] = fv - return nil -} - -// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type -// with a reflection type object used as a key to lookup the deep-copy function. -type GeneratedDeepCopyFunc struct { - Fn func(in interface{}, out interface{}, c *Cloner) error - InType reflect.Type -} - -// Similar to RegisterDeepCopyFunc, but registers deep copy function that were -// automatically generated. -func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { - c.generatedDeepCopyFuncs[fn.InType] = fn.Fn - return nil -} - -// DeepCopy will perform a deep copy of a given object. -func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { - // Can be invalid if we run DeepCopy(X) where X is a nil interface type. - // For example, we get an invalid value when someone tries to deep-copy - // a nil labels.Selector. - // This does not occur if X is nil and is a pointer to a concrete type. - if in == nil { - return nil, nil - } - inValue := reflect.ValueOf(in) - outValue, err := c.deepCopy(inValue) - if err != nil { - return nil, err - } - return outValue.Interface(), nil -} - -func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { - inType := src.Type() - - switch src.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - if src.IsNil() { - return src, nil - } - } - - if fv, ok := c.deepCopyFuncs[inType]; ok { - return c.customDeepCopy(src, fv) - } - if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { - var outValue reflect.Value - outValue = reflect.New(inType.Elem()) - err := fv(src.Interface(), outValue.Interface(), c) - return outValue, err - } - return c.defaultDeepCopy(src) -} - -func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { - outValue := reflect.New(src.Type().Elem()) - args := []reflect.Value{src, outValue, reflect.ValueOf(c)} - result := fv.Call(args)[0].Interface() - // This convolution is necessary because nil interfaces won't convert - // to error. - if result == nil { - return outValue, nil - } - return outValue, result.(error) -} - -func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { - switch src.Kind() { - case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: - return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind()) - case reflect.Array: - dst := reflect.New(src.Type()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst.Elem().Index(i).Set(copyVal) - } - return dst.Elem(), nil - case reflect.Interface: - if src.IsNil() { - return src, nil - } - return c.deepCopy(src.Elem()) - case reflect.Map: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeMap(src.Type()) - for _, k := range src.MapKeys() { - copyVal, err := c.deepCopy(src.MapIndex(k)) - if err != nil { - return src, err - } - dst.SetMapIndex(k, copyVal) - } - return dst, nil - case reflect.Ptr: - if src.IsNil() { - return src, nil - } - dst := reflect.New(src.Type().Elem()) - copyVal, err := c.deepCopy(src.Elem()) - if err != nil { - return src, err - } - dst.Elem().Set(copyVal) - return dst, nil - case reflect.Slice: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeSlice(src.Type(), 0, src.Len()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst = reflect.Append(dst, copyVal) - } - return dst, nil - case reflect.Struct: - dst := reflect.New(src.Type()) - for i := 0; i < src.NumField(); i++ { - if !dst.Elem().Field(i).CanSet() { - // Can't set private fields. At this point, the - // best we can do is a shallow copy. For - // example, time.Time is a value type with - // private members that can be shallow copied. - return src, nil - } - copyVal, err := c.deepCopy(src.Field(i)) - if err != nil { - return src, err - } - dst.Elem().Field(i).Set(copyVal) - } - return dst.Elem(), nil - - default: - // Value types like numbers, booleans, and strings. - return src, nil - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 80ba3fb7..823ef32a 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package labels -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Requirement).DeepCopyInto(out.(*Requirement)) - return nil - }, InType: reflect.TypeOf(&Requirement{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Requirement) DeepCopyInto(out *Requirement) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index d9748f06..5b3080aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -139,6 +139,7 @@ func NewParameterCodec(scheme *Scheme) ParameterCodec { typer: scheme, convertor: scheme, creator: scheme, + defaulter: scheme, } } @@ -147,6 +148,7 @@ type parameterCodec struct { typer ObjectTyper convertor ObjectConvertor creator ObjectCreater + defaulter ObjectDefaulter } var _ ParameterCodec = ¶meterCodec{} @@ -163,9 +165,17 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro } for i := range targetGVKs { if targetGVKs[i].GroupVersion() == from { - return c.convertor.Convert(¶meters, into, nil) + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil } } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) if err != nil { return err @@ -173,6 +183,10 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro if err := c.convertor.Convert(¶meters, input, nil); err != nil { return err } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } return c.convertor.Convert(input, into, nil) } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go similarity index 90% rename from vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go rename to vendor/k8s.io/apimachinery/pkg/runtime/converter.go index d0e625d2..f6f7c10d 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unstructured +package runtime import ( "bytes" @@ -27,19 +27,18 @@ import ( "strings" "sync" "sync/atomic" + "time" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "github.com/golang/glog" ) -// Converter is an interface for converting between interface{} +// UnstructuredConverter is an interface for converting between interface{} // and map[string]interface representation. -type Converter interface { +type UnstructuredConverter interface { ToUnstructured(obj interface{}) (map[string]interface{}, error) FromUnstructured(u map[string]interface{}, obj interface{}) error } @@ -78,7 +77,16 @@ var ( float64Type = reflect.TypeOf(float64(0)) boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() - DefaultConverter = NewConverter(parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR"))) + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } ) func parseBool(key string) bool { @@ -92,24 +100,30 @@ func parseBool(key string) bool { return value } -// ConverterImpl knows how to convert between interface{} and +// unstructuredConverter knows how to convert between interface{} and // Unstructured in both ways. -type converterImpl struct { +type unstructuredConverter struct { // If true, we will be additionally running conversion via json // to ensure that the result is true. // This is supposed to be set only in tests. mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities } -func NewConverter(mismatchDetection bool) Converter { - return &converterImpl{ - mismatchDetection: mismatchDetection, +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, } } // FromUnstructured converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. -func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error { +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { @@ -122,8 +136,8 @@ func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface if (err != nil) != (newErr != nil) { glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } - if err == nil && !apiequality.Semantic.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch for %#v, diff: %v", obj, diff.ObjectReflectDiff(obj, newObj)) + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -393,11 +407,12 @@ func interfaceFromUnstructured(sv, dv reflect.Value) error { // ToUnstructured converts an object into map[string]interface{} representation. // It uses encoding/json/Marshaler if object implements it or reflection if not. -func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) { +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { var u map[string]interface{} var err error - if unstr, ok := obj.(runtime.Unstructured); ok { - u = DeepCopyJSON(unstr.UnstructuredContent()) + if unstr, ok := obj.(Unstructured); ok { + // UnstructuredContent() mutates the object so we need to make a copy first + u = unstr.DeepCopyObject().(Unstructured).UnstructuredContent() } else { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) @@ -413,8 +428,8 @@ func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, if (err != nil) != (newErr != nil) { glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } - if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { @@ -426,21 +441,23 @@ func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, // DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains // types produced by json.Unmarshal(). func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { - return deepCopyJSON(x).(map[string]interface{}) + return DeepCopyJSONValue(x).(map[string]interface{}) } -func deepCopyJSON(x interface{}) interface{} { +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal(). +func DeepCopyJSONValue(x interface{}) interface{} { switch x := x.(type) { case map[string]interface{}: clone := make(map[string]interface{}, len(x)) for k, v := range x { - clone[k] = deepCopyJSON(v) + clone[k] = DeepCopyJSONValue(v) } return clone case []interface{}: clone := make([]interface{}, len(x)) for i, v := range x { - clone[i] = deepCopyJSON(v) + clone[i] = DeepCopyJSONValue(v) } return clone case string, int64, bool, float64, nil, encodingjson.Number: diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 21a35570..86b24840 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -94,8 +94,6 @@ type missingVersionErr struct { data string } -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Version' field. func NewMissingVersionErr(data string) error { return &missingVersionErr{data} } @@ -104,6 +102,8 @@ func (k *missingVersionErr) Error() string { return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) } +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. func IsMissingVersion(err error) bool { if err == nil { return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index c90eef5a..9d00f165 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -233,13 +233,13 @@ type Object interface { // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { - // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected - // to bypass conversion. - IsUnstructuredObject() + Object // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) // IsList returns true if this type is a list or matches the list convention - has an array called "items". IsList() bool // EachListItem should pass a single item out of the list as an Object to the provided function. Any diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index c597fcf9..08b75538 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -68,10 +68,6 @@ type Scheme struct { // converter stores all registered conversion functions. It also has // default coverting behavior. converter *conversion.Converter - - // cloner stores all registered copy functions. It also has default - // deep copy behavior. - cloner *conversion.Cloner } // Function to convert a field selector to internal representation. @@ -80,11 +76,10 @@ type FieldLabelConversionFunc func(label, value string) (internalLabel, internal // NewScheme creates a new Scheme. This scheme is pluggable by default. func NewScheme() *Scheme { s := &Scheme{ - gvkToType: map[schema.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, - unversionedKinds: map[string]reflect.Type{}, - cloner: conversion.NewCloner(), + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, } @@ -222,19 +217,22 @@ func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { return s.gvkToType } -// ObjectKind returns the group,version,kind of the go object and true if this object -// is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { - gvks, unversionedType, err := s.ObjectKinds(obj) - if err != nil { - return schema.GroupVersionKind{}, false, err - } - return gvks[0], unversionedType, nil -} - // ObjectKinds returns all possible group,version,kind of the go object, true if the // object is considered unversioned, or an error if it's not a pointer or is unregistered. func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + v, err := conversion.EnforcePtr(obj) if err != nil { return nil, false, err @@ -343,7 +341,7 @@ func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { return nil } -// Similar to AddConversionFuncs, but registers conversion functions that were +// AddGeneratedConversionFuncs registers conversion functions that were // automatically generated. func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { for _, f := range conversionFuncs { @@ -354,29 +352,6 @@ func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) err return nil } -// AddDeepCopyFuncs adds a function to the list of deep-copy functions. -// For the expected format of deep-copy function, see the comment for -// Copier.RegisterDeepCopyFunction. -func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were -// automatically generated. -func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { - for _, fn := range deepCopyFuncs { - if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { - return err - } - } - return nil -} - // AddFieldLabelConversionFunc adds a conversion function to convert field selectors // of the given kind from the given version to internal version representation. func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { @@ -424,10 +399,68 @@ func (s *Scheme) Default(src Object) { // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, // a to test conversion of types that are nested within registered types). The -// context interface is passed to the convertor. -// TODO: identify whether context should be hidden, or behind a formal context/scope -// interface +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + flags, meta := s.generateConvertMeta(in) meta.Context = context if flags == 0 { @@ -436,8 +469,8 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { return s.converter.Convert(in, out, flags, meta) } -// Converts the given field label and value for an kind field selector from -// versioned representation to an unversioned one. +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { if s.fieldLabelConversionFuncs[version] == nil { return DefaultMetaV1FieldSelectorConversion(label, value) @@ -467,15 +500,30 @@ func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Objec // convertToVersion handles conversion with an optional copy. func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { - // determine the incoming kinds with as few allocations as possible. - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } } + kinds, ok := s.typeToGVK[t] if !ok || len(kinds) == 0 { return nil, NewNotRegisteredErrForType(t) @@ -491,7 +539,6 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } return copyAndSetTargetKind(copy, in, unversionedKind) } - return nil, NewNotRegisteredErrForTarget(t, target) } @@ -529,6 +576,25 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( return out, nil } +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + // generateConvertMeta constructs the meta value we pass to Convert. func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { return s.converter.DefaultMeta(reflect.TypeOf(in)) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index ce3d77c2..8a217f32 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -150,9 +150,10 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if into != nil { + _, isUnstructured := into.(runtime.Unstructured) types, _, err := s.typer.ObjectKinds(into) switch { - case runtime.IsNotRegisteredError(err): + case runtime.IsNotRegisteredError(err), isUnstructured: if err := jsoniter.ConfigFastest.Unmarshal(data, into); err != nil { return nil, actual, err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index d347461a..929c67a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -20,31 +20,6 @@ limitations under the License. package runtime -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RawExtension).DeepCopyInto(out.(*RawExtension)) - return nil - }, InType: reflect.TypeOf(&RawExtension{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unknown).DeepCopyInto(out.(*Unknown)) - return nil - }, InType: reflect.TypeOf(&Unknown{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VersionedObjects).DeepCopyInto(out.(*VersionedObjects)) - return nil - }, InType: reflect.TypeOf(&VersionedObjects{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RawExtension) DeepCopyInto(out *RawExtension) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go index ac3c1e8c..16501d5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -29,6 +29,7 @@ var ( ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") ) func ErrNoMergeKey(m map[string]interface{}, k string) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index b544a60a..bc2a531b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -276,17 +276,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) + ip := net.ParseIP(req.URL.Hostname()) if ip == nil { return delegate(req) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 00000000..ab66d045 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 00000000..c31de15e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 01014f9d..fd08759f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -22,9 +22,9 @@ import ( "sort" "strings" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" - forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" ) // An alternate implementation of JSON Merge Patch @@ -92,6 +92,16 @@ type MergeOptions struct { // return a patch that yields the modified document when applied to the original document, or an error // if either of the two documents is invalid. func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -106,7 +116,7 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f } } - patchMap, err := CreateTwoWayMergeMapPatch(originalMap, modifiedMap, dataStruct, fns...) + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) if err != nil { return nil, err } @@ -118,15 +128,19 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f // encoded JSONMap. // The serialized version of the map can then be passed to StrategicMergeMapPatch. func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { diffOptions := DiffOptions{ SetElementOrder: true, } - patchMap, err := diffMaps(original, modified, t, diffOptions) + patchMap, err := diffMaps(original, modified, schema, diffOptions) if err != nil { return nil, err } @@ -151,12 +165,9 @@ func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{ // - IFF list of primitives && merge strategy - use parallel deletion list // - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified // - Build $retainKeys directive for fields with retainKeys patch strategy -func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOptions DiffOptions) (map[string]interface{}, error) { +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { patch := map[string]interface{}{} - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } + // This will be used to build the $retainKeys directive sent in the patch retainKeysList := make([]interface{}, 0, len(modified)) @@ -198,10 +209,10 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOpt switch originalValueTyped := originalValue.(type) { case map[string]interface{}: modifiedValueTyped := modifiedValue.(map[string]interface{}) - err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) case []interface{}: modifiedValueTyped := modifiedValue.([]interface{}) - err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) default: replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) } @@ -248,8 +259,9 @@ func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, _, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -259,7 +271,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -271,7 +283,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in patch[key] = modifiedValue } default: - patchValue, err := diffMaps(originalValue, modifiedValue, fieldType, diffOptions) + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) if err != nil { return err } @@ -290,8 +302,8 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -301,7 +313,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -309,7 +321,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Merge the 2 slices using mergePatchKey case mergeDirective: diffOptions.BuildRetainKeysDirective = retainKeys - addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, fieldType.Elem(), fieldPatchMergeKey, diffOptions) + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) if err != nil { return err } @@ -536,7 +548,7 @@ func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind refl // another list to set the order of the list // Only list of primitives with merge strategy will generate a parallel deletion list. // These two lists should yield modified when applied to original, for lists with merge semantics. -func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { if len(original) == 0 { // Both slices are empty - do nothing if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { @@ -556,7 +568,7 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string kind := elementType.Kind() switch kind { case reflect.Map: - patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions) + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) if err != nil { return nil, nil, nil, err } @@ -702,15 +714,15 @@ func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, lis // diffListsOfMaps takes a pair of lists and // returns a (recursive) strategic merge patch list contains additions and changes and // a deletion list contains deletions -func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { patch := make([]interface{}, 0, len(modified)) deletionList := make([]interface{}, 0, len(original)) - originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) if err != nil { return nil, nil, err } - modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) if err != nil { return nil, nil, err } @@ -745,7 +757,7 @@ func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey switch { case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalElement, modifiedElement, t, diffOptions) + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) if err != nil { return nil, nil, err } @@ -798,6 +810,15 @@ func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []inte // must be json encoded content. A patch can be created from an original and a modified document // by calling CreateStrategicMergePatch. func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { originalMap, err := handleUnmarshal(original) if err != nil { return nil, err @@ -807,7 +828,7 @@ func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte return nil, err } - result, err := StrategicMergeMapPatch(originalMap, patchMap, dataStruct) + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) if err != nil { return nil, err } @@ -828,38 +849,35 @@ func handleUnmarshal(j []byte) (map[string]interface{}, error) { return m, nil } -// StrategicMergePatch applies a strategic merge patch. The original and patch documents +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents // must be JSONMap. A patch can be created from an original and modified document by // calling CreateTwoWayMergeMapPatch. // Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { mergeOptions := MergeOptions{ MergeParallelList: true, IgnoreUnmatchedNulls: true, } - return mergeMap(original, patch, t, mergeOptions) -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) - } - - t := reflect.TypeOf(dataStruct) - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) - } - - return t, nil + return mergeMap(original, patch, schema, mergeOptions) } // handleDirectiveInMergeMap handles the patch directive when merging 2 maps. @@ -1067,7 +1085,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. // Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md -func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error { +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { @@ -1094,9 +1112,9 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty var ( ok bool originalFieldValue, patchFieldValue, merged []interface{} - patchStrategy, mergeKey string - patchStrategies []string - fieldType reflect.Type + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta ) typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) if !ok { @@ -1122,16 +1140,16 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty return mergepatch.ErrBadArgType(patchFieldValue, patchList) } } - fieldType, patchStrategies, mergeKey, err = forkedjson.LookupPatchMetadata(t, originalKey) + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) if err != nil { return err } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchStrategies) + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } // Check for consistency between the element order list and the field it applies to - err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, mergeKey) + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1144,8 +1162,8 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // list was added merged = patchFieldValue case foundOriginal && foundPatch: - merged, err = mergeSliceHandler(originalList, patchList, fieldType, - patchStrategy, mergeKey, false, mergeOptions) + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) if err != nil { return err } @@ -1155,13 +1173,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // Split all items into patch items and server-only items and then enforce the order. var patchItems, serverOnlyItems []interface{} - if len(mergeKey) == 0 { + if len(patchMeta.GetPatchMergeKey()) == 0 { // Primitives doesn't need merge key to do partitioning. patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) } else { // Maps need merge key to do partitioning. - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, mergeKey) + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1175,7 +1193,7 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // normalize merged list // typedSetElementOrderList contains all the relative order in typedPatchList, // so don't need to use typedPatchList - both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, mergeKey, kind) + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) if err != nil { return err } @@ -1237,7 +1255,7 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey // If patch contains any null field (e.g. field_1: null) that is not // present in original, then to propagate it to the end result use // mergeOptions.IgnoreUnmatchedNulls == false. -func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) (map[string]interface{}, error) { +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { if v, ok := patch[directiveMarker]; ok { return handleDirectiveInMergeMap(v, patch) } @@ -1257,7 +1275,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // When not merging the directive, it will make sure $setElementOrder list exist only in original. // When merging the directive, it will process $setElementOrder and its patch list together. // This function will delete the merged elements from patch so they will not be reprocessed - err = mergePatchIntoOriginal(original, patch, t, mergeOptions) + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) if err != nil { return nil, err } @@ -1295,11 +1313,6 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } - // If the data type is a pointer, resolve the element. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - originalType := reflect.TypeOf(original[k]) patchType := reflect.TypeOf(patchV) if originalType != patchType { @@ -1307,22 +1320,27 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } // If they're both maps or lists, recurse into the value. - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - switch originalType.Kind() { case reflect.Map: - - original[k], err = mergeMapHandler(original[k], patchV, fieldType, patchStrategy, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) case reflect.Slice: - original[k], err = mergeSliceHandler(original[k], patchV, fieldType, patchStrategy, fieldPatchMergeKey, isDeleteList, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: original[k] = patchV } @@ -1335,7 +1353,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy and mergeOptions. -func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) if err != nil { @@ -1343,7 +1361,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy != replaceDirective { - return mergeMap(typedOriginal, typedPatch, fieldType, mergeOptions) + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) } else { return typedPatch, nil } @@ -1351,7 +1369,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, // mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. -func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) if err != nil { @@ -1359,8 +1377,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy == mergeDirective { - elemType := fieldType.Elem() - return mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey, mergeOptions, isDeleteList) + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil } @@ -1369,7 +1386,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, // Merge two slices together. Note: This may modify both the original slice and // the patch because getting a deep copy of a slice in golang is highly // non-trivial. -func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { if len(original) == 0 && len(patch) == 0 { return original, nil } @@ -1394,7 +1411,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s } else { if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) } original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) @@ -1402,7 +1419,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s return nil, err } - merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, elemType, mergeOptions) + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) if err != nil { return nil, err } @@ -1480,7 +1497,7 @@ func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue i // mergeSliceWithoutSpecialElements merges slices with non-special elements. // original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, elemType reflect.Type, mergeOptions MergeOptions) ([]interface{}, error) { +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { for _, v := range patch { typedV := v.(map[string]interface{}) mergeValue, ok := typedV[mergeKey] @@ -1499,7 +1516,7 @@ func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey st var mergedMaps interface{} var err error // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, elemType, mergeOptions) + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) if err != nil { return nil, err } @@ -1532,7 +1549,7 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ for k, v := range m { typedV, ok := v.(map[string]interface{}) if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) } valueToMatch, ok := typedV[key] @@ -1548,14 +1565,14 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ // by key. This is needed by tests because in JSON, list order is significant, // but in Strategic Merge Patch, merge lists do not have significant order. // Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { var m map[string]interface{} err := json.Unmarshal(mapJSON, &m) if err != nil { - return nil, err + return nil, mergepatch.ErrBadJSONDoc } - newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) + newM, err := sortMergeListsByNameMap(m, schema) if err != nil { return nil, err } @@ -1564,7 +1581,7 @@ func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. -func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { newS := map[string]interface{}{} for k, v := range s { if k == retainKeysDirective { @@ -1585,26 +1602,29 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList } } else if k != directiveMarker { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return nil, err } - } else if typedV, ok := v.([]interface{}); ok { if patchStrategy == mergeDirective { var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) if err != nil { return nil, err } @@ -1619,7 +1639,7 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { if len(s) == 0 { return s, nil } @@ -1642,7 +1662,7 @@ func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey for _, elem := range s { if recurse { typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) + newElem, err := sortMergeListsByNameMap(typedElem, schema) if err != nil { return nil, err } @@ -1788,18 +1808,13 @@ func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { // objects overlap with different values in any key. All keys are required to be // strings. Since patches of the same Type have congruent keys, this is valid // for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { - t, err := getTagStructType(dataStruct) - if err != nil { - return true, err - } - - return mergingMapFieldsHaveConflicts(left, right, t, "", "") +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") } func mergingMapFieldsHaveConflicts( left, right interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { switch leftType := left.(type) { @@ -1830,15 +1845,14 @@ func mergingMapFieldsHaveConflicts( return false, nil } // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, fieldType) + return mapsHaveConflicts(leftType, rightType, schema) case []interface{}: rightType, ok := right.([]interface{}) if !ok { return true, nil } - return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) - + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) case string, float64, bool, int, int64, nil: return !reflect.DeepEqual(left, right), nil default: @@ -1846,21 +1860,37 @@ func mergingMapFieldsHaveConflicts( } } -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if key != directiveMarker && key != retainKeysDirective { if rightValue, ok := typedRight[key]; ok { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) - if err != nil { - return true, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return true, err + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } } if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - fieldType, patchStrategy, fieldPatchMergeKey); hasConflicts { + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { return true, err } } @@ -1872,7 +1902,7 @@ func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType func slicesHaveConflicts( typedLeft, typedRight []interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { elementType, err := sliceElementType(typedLeft, typedRight) @@ -1880,7 +1910,6 @@ func slicesHaveConflicts( return true, err } - valueType := fieldType.Elem() if fieldPatchStrategy == mergeDirective { // Merging lists of scalars have no conflicts by definition // So we only need to check further if the elements are maps @@ -1899,7 +1928,7 @@ func slicesHaveConflicts( return true, err } - return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) } // Either we don't have type information, or these are non-merging lists @@ -1917,7 +1946,7 @@ func slicesHaveConflicts( // Compare the slices element by element in order // This test will fail if the slices are not sorted for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { return true, err } } @@ -1944,10 +1973,10 @@ func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]in return result, nil } -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { return true, err } } @@ -1967,7 +1996,7 @@ func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, struc // in a way that is different from how it is changed in current (e.g., deleting it, changing its // value). We also propagate values fields that do not exist in original but are explicitly // defined in modified. -func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -1989,11 +2018,6 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int } } - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - // The patch is the difference from current to modified without deletions, plus deletions // from original to modified. To find it, we compute deletions, which are the deletions from // original to modified, and delta, which is the difference from current to modified without @@ -2002,7 +2026,7 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int IgnoreDeletions: true, SetElementOrder: true, } - deltaMap, err := diffMaps(currentMap, modifiedMap, t, deltaMapDiffOptions) + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) if err != nil { return nil, err } @@ -2010,13 +2034,13 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int SetElementOrder: true, IgnoreChangesAndAdditions: true, } - deletionsMap, err := diffMaps(originalMap, modifiedMap, t, deletionsMapDiffOptions) + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) if err != nil { return nil, err } mergeOptions := MergeOptions{} - patchMap, err := mergeMap(deletionsMap, deltaMap, t, mergeOptions) + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) if err != nil { return nil, err } @@ -2032,12 +2056,12 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int // then return a conflict error. if !overwrite { changeMapDiffOptions := DiffOptions{} - changedMap, err := diffMaps(originalMap, currentMap, t, changeMapDiffOptions) + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) if err != nil { return nil, err } - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 00000000..f84d65aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 322923d4..ab590e13 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package watch -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go index 006972ec..8205a4dd 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -26,17 +26,14 @@ const ( // struct field given the struct type and the JSON name of the field. // It returns field type, a slice of patch strategies, merge key and error. // TODO: fix the returned errors to be introspectable. -func LookupPatchMetadata(t reflect.Type, jsonField string) ( +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { if t.Kind() == reflect.Ptr { t = t.Elem() } - if t.Kind() == reflect.Map { - elemType = t.Elem() - return - } + if t.Kind() != reflect.Struct { - e = fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s", + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", t.Kind().String()) return } diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go index ee72d668..fa7f2ec0 100644 --- a/vendor/k8s.io/client-go/discovery/unstructured.go +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -17,22 +17,28 @@ limitations under the License. package discovery import ( - "fmt" + "reflect" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// UnstructuredObjectTyper provides a runtime.ObjectTyper implementation for // runtime.Unstructured object based on discovery information. type UnstructuredObjectTyper struct { registered map[schema.GroupVersionKind]bool + typers []runtime.ObjectTyper } // NewUnstructuredObjectTyper returns a runtime.ObjectTyper for -// unstructred objects based on discovery information. -func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { - dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} +// unstructured objects based on discovery information. It accepts a list of fallback typers +// for handling objects that are not runtime.Unstructured. It does not delegate the Recognizes +// check, only ObjectKinds. +func NewUnstructuredObjectTyper(groupResources []*APIGroupResources, typers ...runtime.ObjectTyper) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{ + registered: make(map[schema.GroupVersionKind]bool), + typers: typers, + } for _, group := range groupResources { for _, discoveryVersion := range group.Group.Versions { resources, ok := group.VersionedResources[discoveryVersion.Version] @@ -49,29 +55,35 @@ func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *Unstructur return dot } -// ObjectKind returns the group,version,kind of the provided object, or an error -// if the object in not runtime.Unstructured or has no group,version,kind -// information. -func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { - if _, ok := obj.(runtime.Unstructured); !ok { - return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - - return obj.GetObjectKind().GroupVersionKind(), nil -} - // ObjectKinds returns a slice of one element with the group,version,kind of the // provided object, or an error if the object is not runtime.Unstructured or // has no group,version,kind information. unversionedType will always be false // because runtime.Unstructured object should always have group,version,kind // information set. func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return nil, false, err + if _, ok := obj.(runtime.Unstructured); ok { + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + return []schema.GroupVersionKind{gvk}, false, nil } - - return []schema.GroupVersionKind{gvk}, false, nil + var lastErr error + for _, typer := range d.typers { + gvks, unversioned, err := typer.ObjectKinds(obj) + if err != nil { + lastErr = err + continue + } + return gvks, unversioned, nil + } + if lastErr == nil { + lastErr = runtime.NewNotRegisteredErrForType(reflect.TypeOf(obj)) + } + return nil, false, lastErr } // Recognizes returns true if the provided group,version,kind was in the @@ -80,16 +92,4 @@ func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { return d.registered[gvk] } -// IsUnversioned returns false always because runtime.Unstructured objects -// should always have group,version,kind information set. ok will be true if the -// object's group,version,kind is api.Registry. -func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return false, false - } - - return false, d.registered[gvk] -} - var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go new file mode 100644 index 00000000..74bfb601 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package admissionregistration + +import ( + v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" + v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 00000000..0f55c737 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + admissionregistration_v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// InitializerConfigurationInformer provides access to a shared informer and lister for +// InitializerConfigurations. +type InitializerConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.InitializerConfigurationLister +} + +type initializerConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().Watch(options) + }, + }, + &admissionregistration_v1alpha1.InitializerConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *initializerConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1alpha1.InitializerConfiguration{}, f.defaultInformer) +} + +func (f *initializerConfigurationInformer) Lister() v1alpha1.InitializerConfigurationLister { + return v1alpha1.NewInitializerConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go new file mode 100644 index 00000000..44da0479 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // InitializerConfigurations returns a InitializerConfigurationInformer. + InitializerConfigurations() InitializerConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// InitializerConfigurations returns a InitializerConfigurationInformer. +func (v *version) InitializerConfigurations() InitializerConfigurationInformer { + return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go new file mode 100644 index 00000000..4f08d69a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..31a2a865 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister { + return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..d87ab900 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister { + return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/interface.go b/vendor/k8s.io/client-go/informers/apps/interface.go new file mode 100644 index 00000000..fdd32de0 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package apps + +import ( + v1 "k8s.io/client-go/informers/apps/v1" + v1beta1 "k8s.io/client-go/informers/apps/v1beta1" + v1beta2 "k8s.io/client-go/informers/apps/v1beta2" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface + // V1beta2 provides access to shared informers for resources in V1beta2. + V1beta2() v1beta2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta2 returns a new v1beta2.Interface. +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go new file mode 100644 index 00000000..a69be9c7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister { + return v1.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go new file mode 100644 index 00000000..1c7abf7d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).Watch(options) + }, + }, + &apps_v1.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1.DaemonSetLister { + return v1.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go new file mode 100644 index 00000000..9f6beed6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).Watch(options) + }, + }, + &apps_v1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1.DeploymentLister { + return v1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1/interface.go new file mode 100644 index 00000000..6145fd6c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go new file mode 100644 index 00000000..1ac50607 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).Watch(options) + }, + }, + &apps_v1.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1.ReplicaSetLister { + return v1.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go new file mode 100644 index 00000000..535790df --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1.StatefulSetLister { + return v1.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go new file mode 100644 index 00000000..1e2de416 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1beta1.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister { + return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go new file mode 100644 index 00000000..4d2dea57 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().Deployments(namespace).Watch(options) + }, + }, + &apps_v1beta1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { + return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go new file mode 100644 index 00000000..3a51a1f5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go new file mode 100644 index 00000000..779ae2c6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1beta1.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister { + return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go new file mode 100644 index 00000000..a7d55ab4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1beta2.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister { + return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go new file mode 100644 index 00000000..5d328802 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().DaemonSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister { + return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go new file mode 100644 index 00000000..6b6cd603 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().Deployments(namespace).Watch(options) + }, + }, + &apps_v1beta2.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta2.DeploymentLister { + return v1beta2.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go new file mode 100644 index 00000000..59a6e73d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go new file mode 100644 index 00000000..988a3e4f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ReplicaSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister { + return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go new file mode 100644 index 00000000..dff9c240 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister { + return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/interface.go new file mode 100644 index 00000000..63a5c0cc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/interface.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package autoscaling + +import ( + v1 "k8s.io/client-go/informers/autoscaling/v1" + v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V2beta1 provides access to shared informers for resources in V2beta1. + V2beta1() v2beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V2beta1 returns a new v2beta1.Interface. +func (g *group) V2beta1() v2beta1.Interface { + return v2beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 00000000..7d875e73 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + autoscaling_v1 "k8s.io/api/autoscaling/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/autoscaling/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for +// HorizontalPodAutoscalers. +type HorizontalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.HorizontalPodAutoscalerLister +} + +type horizontalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options) + }, + }, + &autoscaling_v1.HorizontalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_v1.HorizontalPodAutoscaler{}, f.defaultInformer) +} + +func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister { + return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go new file mode 100644 index 00000000..5ba90701 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. + HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go new file mode 100644 index 00000000..9865f8e1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2beta1 + +import ( + autoscaling_v2beta1 "k8s.io/api/autoscaling/v2beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for +// HorizontalPodAutoscalers. +type HorizontalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2beta1.HorizontalPodAutoscalerLister +} + +type horizontalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options) + }, + }, + &autoscaling_v2beta1.HorizontalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_v2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) +} + +func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister { + return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go new file mode 100644 index 00000000..4c9ea849 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. + HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/interface.go b/vendor/k8s.io/client-go/informers/batch/interface.go new file mode 100644 index 00000000..bbaec796 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package batch + +import ( + v1 "k8s.io/client-go/informers/batch/v1" + v1beta1 "k8s.io/client-go/informers/batch/v1beta1" + v2alpha1 "k8s.io/client-go/informers/batch/v2alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface + // V2alpha1 provides access to shared informers for resources in V2alpha1. + V2alpha1() v2alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V2alpha1 returns a new v2alpha1.Interface. +func (g *group) V2alpha1() v2alpha1.Interface { + return v2alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1/interface.go new file mode 100644 index 00000000..41c08ea2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Jobs returns a JobInformer. + Jobs() JobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Jobs returns a JobInformer. +func (v *version) Jobs() JobInformer { + return &jobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1/job.go b/vendor/k8s.io/client-go/informers/batch/v1/job.go new file mode 100644 index 00000000..8a2e5f0d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + batch_v1 "k8s.io/api/batch/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/batch/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// JobInformer provides access to a shared informer and lister for +// Jobs. +type JobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.JobLister +} + +type jobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1().Jobs(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1().Jobs(namespace).Watch(options) + }, + }, + &batch_v1.Job{}, + resyncPeriod, + indexers, + ) +} + +func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *jobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v1.Job{}, f.defaultInformer) +} + +func (f *jobInformer) Lister() v1.JobLister { + return v1.NewJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go new file mode 100644 index 00000000..4edfd415 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/batch/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CronJobInformer provides access to a shared informer and lister for +// CronJobs. +type CronJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CronJobLister +} + +type cronJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1beta1().CronJobs(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1beta1().CronJobs(namespace).Watch(options) + }, + }, + &batch_v1beta1.CronJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cronJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v1beta1.CronJob{}, f.defaultInformer) +} + +func (f *cronJobInformer) Lister() v1beta1.CronJobLister { + return v1beta1.NewCronJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go new file mode 100644 index 00000000..0ba1935d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CronJobs returns a CronJobInformer. + CronJobs() CronJobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CronJobs returns a CronJobInformer. +func (v *version) CronJobs() CronJobInformer { + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go new file mode 100644 index 00000000..03a6e6f8 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2alpha1 + +import ( + batch_v2alpha1 "k8s.io/api/batch/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v2alpha1 "k8s.io/client-go/listers/batch/v2alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CronJobInformer provides access to a shared informer and lister for +// CronJobs. +type CronJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CronJobLister +} + +type cronJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV2alpha1().CronJobs(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV2alpha1().CronJobs(namespace).Watch(options) + }, + }, + &batch_v2alpha1.CronJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cronJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v2alpha1.CronJob{}, f.defaultInformer) +} + +func (f *cronJobInformer) Lister() v2alpha1.CronJobLister { + return v2alpha1.NewCronJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go new file mode 100644 index 00000000..39b6f33f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CronJobs returns a CronJobInformer. + CronJobs() CronJobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CronJobs returns a CronJobInformer. +func (v *version) CronJobs() CronJobInformer { + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/certificates/interface.go b/vendor/k8s.io/client-go/informers/certificates/interface.go new file mode 100644 index 00000000..1eefe479 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package certificates + +import ( + v1beta1 "k8s.io/client-go/informers/certificates/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 00000000..44aac5c7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + certificates_v1beta1 "k8s.io/api/certificates/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/certificates/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CertificateSigningRequestInformer provides access to a shared informer and lister for +// CertificateSigningRequests. +type CertificateSigningRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CertificateSigningRequestLister +} + +type certificateSigningRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1beta1().CertificateSigningRequests().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(options) + }, + }, + &certificates_v1beta1.CertificateSigningRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certificates_v1beta1.CertificateSigningRequest{}, f.defaultInformer) +} + +func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister { + return v1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go new file mode 100644 index 00000000..8578023c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CertificateSigningRequests returns a CertificateSigningRequestInformer. + CertificateSigningRequests() CertificateSigningRequestInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CertificateSigningRequests returns a CertificateSigningRequestInformer. +func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer { + return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/core/interface.go b/vendor/k8s.io/client-go/informers/core/interface.go new file mode 100644 index 00000000..7fc2a5cd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package core + +import ( + v1 "k8s.io/client-go/informers/core/v1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go new file mode 100644 index 00000000..77b17fd3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ComponentStatusInformer provides access to a shared informer and lister for +// ComponentStatuses. +type ComponentStatusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ComponentStatusLister +} + +type componentStatusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().Watch(options) + }, + }, + &core_v1.ComponentStatus{}, + resyncPeriod, + indexers, + ) +} + +func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ComponentStatus{}, f.defaultInformer) +} + +func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { + return v1.NewComponentStatusLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go new file mode 100644 index 00000000..ed0f4c2d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ConfigMapInformer provides access to a shared informer and lister for +// ConfigMaps. +type ConfigMapInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ConfigMapLister +} + +type configMapInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).Watch(options) + }, + }, + &core_v1.ConfigMap{}, + resyncPeriod, + indexers, + ) +} + +func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configMapInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ConfigMap{}, f.defaultInformer) +} + +func (f *configMapInformer) Lister() v1.ConfigMapLister { + return v1.NewConfigMapLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go new file mode 100644 index 00000000..8a7228ba --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EndpointsInformer provides access to a shared informer and lister for +// Endpoints. +type EndpointsInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EndpointsLister +} + +type endpointsInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).Watch(options) + }, + }, + &core_v1.Endpoints{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointsInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Endpoints{}, f.defaultInformer) +} + +func (f *endpointsInformer) Lister() v1.EndpointsLister { + return v1.NewEndpointsLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go new file mode 100644 index 00000000..23f5ead6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).Watch(options) + }, + }, + &core_v1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1.EventLister { + return v1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/interface.go b/vendor/k8s.io/client-go/informers/core/v1/interface.go new file mode 100644 index 00000000..e560b12f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/interface.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ComponentStatuses returns a ComponentStatusInformer. + ComponentStatuses() ComponentStatusInformer + // ConfigMaps returns a ConfigMapInformer. + ConfigMaps() ConfigMapInformer + // Endpoints returns a EndpointsInformer. + Endpoints() EndpointsInformer + // Events returns a EventInformer. + Events() EventInformer + // LimitRanges returns a LimitRangeInformer. + LimitRanges() LimitRangeInformer + // Namespaces returns a NamespaceInformer. + Namespaces() NamespaceInformer + // Nodes returns a NodeInformer. + Nodes() NodeInformer + // PersistentVolumes returns a PersistentVolumeInformer. + PersistentVolumes() PersistentVolumeInformer + // PersistentVolumeClaims returns a PersistentVolumeClaimInformer. + PersistentVolumeClaims() PersistentVolumeClaimInformer + // Pods returns a PodInformer. + Pods() PodInformer + // PodTemplates returns a PodTemplateInformer. + PodTemplates() PodTemplateInformer + // ReplicationControllers returns a ReplicationControllerInformer. + ReplicationControllers() ReplicationControllerInformer + // ResourceQuotas returns a ResourceQuotaInformer. + ResourceQuotas() ResourceQuotaInformer + // Secrets returns a SecretInformer. + Secrets() SecretInformer + // Services returns a ServiceInformer. + Services() ServiceInformer + // ServiceAccounts returns a ServiceAccountInformer. + ServiceAccounts() ServiceAccountInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ComponentStatuses returns a ComponentStatusInformer. +func (v *version) ComponentStatuses() ComponentStatusInformer { + return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ConfigMaps returns a ConfigMapInformer. +func (v *version) ConfigMaps() ConfigMapInformer { + return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Endpoints returns a EndpointsInformer. +func (v *version) Endpoints() EndpointsInformer { + return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// LimitRanges returns a LimitRangeInformer. +func (v *version) LimitRanges() LimitRangeInformer { + return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Namespaces returns a NamespaceInformer. +func (v *version) Namespaces() NamespaceInformer { + return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Nodes returns a NodeInformer. +func (v *version) Nodes() NodeInformer { + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumes returns a PersistentVolumeInformer. +func (v *version) PersistentVolumes() PersistentVolumeInformer { + return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumeClaims returns a PersistentVolumeClaimInformer. +func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer { + return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Pods returns a PodInformer. +func (v *version) Pods() PodInformer { + return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodTemplates returns a PodTemplateInformer. +func (v *version) PodTemplates() PodTemplateInformer { + return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicationControllers returns a ReplicationControllerInformer. +func (v *version) ReplicationControllers() ReplicationControllerInformer { + return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceQuotas returns a ResourceQuotaInformer. +func (v *version) ResourceQuotas() ResourceQuotaInformer { + return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Secrets returns a SecretInformer. +func (v *version) Secrets() SecretInformer { + return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServiceAccounts returns a ServiceAccountInformer. +func (v *version) ServiceAccounts() ServiceAccountInformer { + return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go new file mode 100644 index 00000000..9588b940 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// LimitRangeInformer provides access to a shared informer and lister for +// LimitRanges. +type LimitRangeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.LimitRangeLister +} + +type limitRangeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).Watch(options) + }, + }, + &core_v1.LimitRange{}, + resyncPeriod, + indexers, + ) +} + +func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.LimitRange{}, f.defaultInformer) +} + +func (f *limitRangeInformer) Lister() v1.LimitRangeLister { + return v1.NewLimitRangeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go new file mode 100644 index 00000000..eb841b15 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NamespaceInformer provides access to a shared informer and lister for +// Namespaces. +type NamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NamespaceLister +} + +type namespaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().Watch(options) + }, + }, + &core_v1.Namespace{}, + resyncPeriod, + indexers, + ) +} + +func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *namespaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Namespace{}, f.defaultInformer) +} + +func (f *namespaceInformer) Lister() v1.NamespaceLister { + return v1.NewNamespaceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go new file mode 100644 index 00000000..3c70e52b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NodeInformer provides access to a shared informer and lister for +// Nodes. +type NodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NodeLister +} + +type nodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().Watch(options) + }, + }, + &core_v1.Node{}, + resyncPeriod, + indexers, + ) +} + +func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *nodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Node{}, f.defaultInformer) +} + +func (f *nodeInformer) Lister() v1.NodeLister { + return v1.NewNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go new file mode 100644 index 00000000..e944560f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PersistentVolumeInformer provides access to a shared informer and lister for +// PersistentVolumes. +type PersistentVolumeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeLister +} + +type persistentVolumeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().Watch(options) + }, + }, + &core_v1.PersistentVolume{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PersistentVolume{}, f.defaultInformer) +} + +func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { + return v1.NewPersistentVolumeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000..136884d4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PersistentVolumeClaimInformer provides access to a shared informer and lister for +// PersistentVolumeClaims. +type PersistentVolumeClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeClaimLister +} + +type persistentVolumeClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).Watch(options) + }, + }, + &core_v1.PersistentVolumeClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PersistentVolumeClaim{}, f.defaultInformer) +} + +func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { + return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go new file mode 100644 index 00000000..b9720829 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodInformer provides access to a shared informer and lister for +// Pods. +type PodInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodLister +} + +type podInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).Watch(options) + }, + }, + &core_v1.Pod{}, + resyncPeriod, + indexers, + ) +} + +func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Pod{}, f.defaultInformer) +} + +func (f *podInformer) Lister() v1.PodLister { + return v1.NewPodLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go new file mode 100644 index 00000000..c0575385 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodTemplateInformer provides access to a shared informer and lister for +// PodTemplates. +type PodTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodTemplateLister +} + +type podTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).Watch(options) + }, + }, + &core_v1.PodTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PodTemplate{}, f.defaultInformer) +} + +func (f *podTemplateInformer) Lister() v1.PodTemplateLister { + return v1.NewPodTemplateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go new file mode 100644 index 00000000..e04cd146 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicationControllerInformer provides access to a shared informer and lister for +// ReplicationControllers. +type ReplicationControllerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicationControllerLister +} + +type replicationControllerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).Watch(options) + }, + }, + &core_v1.ReplicationController{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ReplicationController{}, f.defaultInformer) +} + +func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { + return v1.NewReplicationControllerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go new file mode 100644 index 00000000..3ef4f4c1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ResourceQuotaInformer provides access to a shared informer and lister for +// ResourceQuotas. +type ResourceQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ResourceQuotaLister +} + +type resourceQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).Watch(options) + }, + }, + &core_v1.ResourceQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ResourceQuota{}, f.defaultInformer) +} + +func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { + return v1.NewResourceQuotaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go new file mode 100644 index 00000000..7bc6395a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// SecretInformer provides access to a shared informer and lister for +// Secrets. +type SecretInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.SecretLister +} + +type secretInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).Watch(options) + }, + }, + &core_v1.Secret{}, + resyncPeriod, + indexers, + ) +} + +func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *secretInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Secret{}, f.defaultInformer) +} + +func (f *secretInformer) Lister() v1.SecretLister { + return v1.NewSecretLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go new file mode 100644 index 00000000..d1b5ed02 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).Watch(options) + }, + }, + &core_v1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1.ServiceLister { + return v1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go new file mode 100644 index 00000000..fb9c50aa --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ServiceAccountInformer provides access to a shared informer and lister for +// ServiceAccounts. +type ServiceAccountInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceAccountLister +} + +type serviceAccountInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).Watch(options) + }, + }, + &core_v1.ServiceAccount{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ServiceAccount{}, f.defaultInformer) +} + +func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { + return v1.NewServiceAccountLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/events/interface.go b/vendor/k8s.io/client-go/informers/events/interface.go new file mode 100644 index 00000000..81f6646f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package events + +import ( + v1beta1 "k8s.io/client-go/informers/events/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go new file mode 100644 index 00000000..d604b4cf --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + events_v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/events/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).Watch(options) + }, + }, + &events_v1beta1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&events_v1beta1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1beta1.EventLister { + return v1beta1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go new file mode 100644 index 00000000..d079afed --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Events returns a EventInformer. + Events() EventInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/interface.go b/vendor/k8s.io/client-go/informers/extensions/interface.go new file mode 100644 index 00000000..a6bfc3b4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package extensions + +import ( + v1beta1 "k8s.io/client-go/informers/extensions/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go new file mode 100644 index 00000000..c64b14c3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(options) + }, + }, + &extensions_v1beta1.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister { + return v1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go new file mode 100644 index 00000000..4bcfc5c2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Deployments(namespace).Watch(options) + }, + }, + &extensions_v1beta1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { + return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go new file mode 100644 index 00000000..22dac92b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Ingresses(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) + }, + }, + &extensions_v1beta1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() v1beta1.IngressLister { + return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go new file mode 100644 index 00000000..ce060e0d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer + // PodSecurityPolicies returns a PodSecurityPolicyInformer. + PodSecurityPolicies() PodSecurityPolicyInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodSecurityPolicies returns a PodSecurityPolicyInformer. +func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { + return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000..18ef2735 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodSecurityPolicyInformer provides access to a shared informer and lister for +// PodSecurityPolicies. +type PodSecurityPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodSecurityPolicyLister +} + +type podSecurityPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().PodSecurityPolicies().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(options) + }, + }, + &extensions_v1beta1.PodSecurityPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.PodSecurityPolicy{}, f.defaultInformer) +} + +func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { + return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go new file mode 100644 index 00000000..856cb30b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(options) + }, + }, + &extensions_v1beta1.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister { + return v1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go new file mode 100644 index 00000000..e922c127 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package informers + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + admissionregistration "k8s.io/client-go/informers/admissionregistration" + apps "k8s.io/client-go/informers/apps" + autoscaling "k8s.io/client-go/informers/autoscaling" + batch "k8s.io/client-go/informers/batch" + certificates "k8s.io/client-go/informers/certificates" + core "k8s.io/client-go/informers/core" + events "k8s.io/client-go/informers/events" + extensions "k8s.io/client-go/informers/extensions" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + networking "k8s.io/client-go/informers/networking" + policy "k8s.io/client-go/informers/policy" + rbac "k8s.io/client-go/informers/rbac" + scheduling "k8s.io/client-go/informers/scheduling" + settings "k8s.io/client-go/informers/settings" + storage "k8s.io/client-go/informers/storage" + kubernetes "k8s.io/client-go/kubernetes" + cache "k8s.io/client-go/tools/cache" + reflect "reflect" + sync "sync" + time "time" +) + +type sharedInformerFactory struct { + client kubernetes.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory +func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return &sharedInformerFactory{ + client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + } +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + informer = newFunc(f.client, f.defaultResync) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Admissionregistration() admissionregistration.Interface + Apps() apps.Interface + Autoscaling() autoscaling.Interface + Batch() batch.Interface + Certificates() certificates.Interface + Core() core.Interface + Events() events.Interface + Extensions() extensions.Interface + Networking() networking.Interface + Policy() policy.Interface + Rbac() rbac.Interface + Scheduling() scheduling.Interface + Settings() settings.Interface + Storage() storage.Interface +} + +func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { + return admissionregistration.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Apps() apps.Interface { + return apps.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { + return autoscaling.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Batch() batch.Interface { + return batch.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Certificates() certificates.Interface { + return certificates.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Core() core.Interface { + return core.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Events() events.Interface { + return events.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Extensions() extensions.Interface { + return extensions.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Networking() networking.Interface { + return networking.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Policy() policy.Interface { + return policy.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Rbac() rbac.Interface { + return rbac.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Scheduling() scheduling.Interface { + return scheduling.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Settings() settings.Interface { + return settings.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Storage() storage.Interface { + return storage.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go new file mode 100644 index 00000000..70ed4331 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -0,0 +1,254 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package informers + +import ( + "fmt" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/api/apps/v1" + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1beta2 "k8s.io/api/apps/v1beta2" + autoscaling_v1 "k8s.io/api/autoscaling/v1" + v2beta1 "k8s.io/api/autoscaling/v2beta1" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v2alpha1 "k8s.io/api/batch/v2alpha1" + certificates_v1beta1 "k8s.io/api/certificates/v1beta1" + core_v1 "k8s.io/api/core/v1" + events_v1beta1 "k8s.io/api/events/v1beta1" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + networking_v1 "k8s.io/api/networking/v1" + policy_v1beta1 "k8s.io/api/policy/v1beta1" + rbac_v1 "k8s.io/api/rbac/v1" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" + settings_v1alpha1 "k8s.io/api/settings/v1alpha1" + storage_v1 "k8s.io/api/storage/v1" + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" + storage_v1beta1 "k8s.io/api/storage/v1beta1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("initializerconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().InitializerConfigurations().Informer()}, nil + + // Group=admissionregistration.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil + + // Group=apps, Version=v1 + case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil + + // Group=apps, Version=v1beta1 + case apps_v1beta1.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().ControllerRevisions().Informer()}, nil + case apps_v1beta1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().Deployments().Informer()}, nil + case apps_v1beta1.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().StatefulSets().Informer()}, nil + + // Group=apps, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ControllerRevisions().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().DaemonSets().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().Deployments().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ReplicaSets().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil + + // Group=autoscaling, Version=v1 + case autoscaling_v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil + + // Group=autoscaling, Version=v2beta1 + case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil + + // Group=batch, Version=v1 + case batch_v1.SchemeGroupVersion.WithResource("jobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1().Jobs().Informer()}, nil + + // Group=batch, Version=v1beta1 + case batch_v1beta1.SchemeGroupVersion.WithResource("cronjobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1beta1().CronJobs().Informer()}, nil + + // Group=batch, Version=v2alpha1 + case v2alpha1.SchemeGroupVersion.WithResource("cronjobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V2alpha1().CronJobs().Informer()}, nil + + // Group=certificates.k8s.io, Version=v1beta1 + case certificates_v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil + + // Group=core, Version=v1 + case core_v1.SchemeGroupVersion.WithResource("componentstatuses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ComponentStatuses().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("configmaps"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ConfigMaps().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("endpoints"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Endpoints().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("events"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Events().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("limitranges"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().LimitRanges().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("namespaces"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Namespaces().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("nodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Nodes().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("persistentvolumes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumes().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumeClaims().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("pods"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Pods().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("podtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PodTemplates().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("replicationcontrollers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ReplicationControllers().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("resourcequotas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ResourceQuotas().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("secrets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Secrets().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("services"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Services().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("serviceaccounts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil + + // Group=events.k8s.io, Version=v1beta1 + case events_v1beta1.SchemeGroupVersion.WithResource("events"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil + + // Group=extensions, Version=v1beta1 + case extensions_v1beta1.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().DaemonSets().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil + + // Group=networking.k8s.io, Version=v1 + case networking_v1.SchemeGroupVersion.WithResource("networkpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + + // Group=policy, Version=v1beta1 + case policy_v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1 + case rbac_v1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoles().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoleBindings().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().Roles().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().RoleBindings().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1alpha1 + case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoles().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoleBindings().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().Roles().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().RoleBindings().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1beta1 + case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoles().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoleBindings().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().Roles().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil + + // Group=scheduling.k8s.io, Version=v1alpha1 + case scheduling_v1alpha1.SchemeGroupVersion.WithResource("priorityclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil + + // Group=settings.k8s.io, Version=v1alpha1 + case settings_v1alpha1.SchemeGroupVersion.WithResource("podpresets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil + + // Group=storage.k8s.io, Version=v1 + case storage_v1.SchemeGroupVersion.WithResource("storageclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil + + // Group=storage.k8s.io, Version=v1alpha1 + case storage_v1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil + + // Group=storage.k8s.io, Version=v1beta1 + case storage_v1beta1.SchemeGroupVersion.WithResource("storageclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000..61155f74 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package internalinterfaces + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + kubernetes "k8s.io/client-go/kubernetes" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go new file mode 100644 index 00000000..79e0d0c1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package networking + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/networking/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1/interface.go new file mode 100644 index 00000000..980a7be9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // NetworkPolicies returns a NetworkPolicyInformer. + NetworkPolicies() NetworkPolicyInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// NetworkPolicies returns a NetworkPolicyInformer. +func (v *version) NetworkPolicies() NetworkPolicyInformer { + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go new file mode 100644 index 00000000..b712ba03 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + networking_v1 "k8s.io/api/networking/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/networking/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NetworkPolicyInformer provides access to a shared informer and lister for +// NetworkPolicies. +type NetworkPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NetworkPolicyLister +} + +type networkPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1().NetworkPolicies(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1().NetworkPolicies(namespace).Watch(options) + }, + }, + &networking_v1.NetworkPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networking_v1.NetworkPolicy{}, f.defaultInformer) +} + +func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister { + return v1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/policy/interface.go b/vendor/k8s.io/client-go/informers/policy/interface.go new file mode 100644 index 00000000..f893c3d5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package policy + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1beta1 "k8s.io/client-go/informers/policy/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go new file mode 100644 index 00000000..f235ee1d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. + PodDisruptionBudgets() PodDisruptionBudgetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PodDisruptionBudgets returns a PodDisruptionBudgetInformer. +func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { + return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 00000000..ba0da35b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + policy_v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/policy/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodDisruptionBudgetInformer provides access to a shared informer and lister for +// PodDisruptionBudgets. +type PodDisruptionBudgetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodDisruptionBudgetLister +} + +type podDisruptionBudgetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(options) + }, + }, + &policy_v1beta1.PodDisruptionBudget{}, + resyncPeriod, + indexers, + ) +} + +func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&policy_v1beta1.PodDisruptionBudget{}, f.defaultInformer) +} + +func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister { + return v1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/interface.go b/vendor/k8s.io/client-go/informers/rbac/interface.go new file mode 100644 index 00000000..df7adfcd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package rbac + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/rbac/v1" + v1alpha1 "k8s.io/client-go/informers/rbac/v1alpha1" + v1beta1 "k8s.io/client-go/informers/rbac/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go new file mode 100644 index 00000000..ac75abbc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoles().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister { + return v1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go new file mode 100644 index 00000000..a3c73e58 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister { + return v1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go new file mode 100644 index 00000000..1e46b039 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1/role.go new file mode 100644 index 00000000..fb1de461 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().Roles(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1.RoleLister { + return v1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go new file mode 100644 index 00000000..78c78fa1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1.RoleBindingLister { + return v1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go new file mode 100644 index 00000000..ec257965 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoles().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1alpha1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister { + return v1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 00000000..a2d0c396 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1alpha1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister { + return v1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go new file mode 100644 index 00000000..586283d4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go new file mode 100644 index 00000000..4564b336 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().Roles(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1alpha1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1alpha1.RoleLister { + return v1alpha1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go new file mode 100644 index 00000000..556f966a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1alpha1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister { + return v1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go new file mode 100644 index 00000000..821746b9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoles().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1beta1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister { + return v1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 00000000..c517ac45 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1beta1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister { + return v1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go new file mode 100644 index 00000000..9d375d94 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go new file mode 100644 index 00000000..0f13d3aa --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().Roles(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1beta1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1beta1.RoleLister { + return v1beta1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go new file mode 100644 index 00000000..c951d97d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1beta1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister { + return v1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go new file mode 100644 index 00000000..60b63e8e --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package scheduling + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/scheduling/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go new file mode 100644 index 00000000..1cceef7b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PriorityClasses returns a PriorityClassInformer. + PriorityClasses() PriorityClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PriorityClasses returns a PriorityClassInformer. +func (v *version) PriorityClasses() PriorityClassInformer { + return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 00000000..5c90f43d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PriorityClassInformer provides access to a shared informer and lister for +// PriorityClasses. +type PriorityClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PriorityClassLister +} + +type priorityClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PriorityClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PriorityClasses().Watch(options) + }, + }, + &scheduling_v1alpha1.PriorityClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&scheduling_v1alpha1.PriorityClass{}, f.defaultInformer) +} + +func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister { + return v1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/settings/interface.go b/vendor/k8s.io/client-go/informers/settings/interface.go new file mode 100644 index 00000000..53bc6621 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package settings + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/settings/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go new file mode 100644 index 00000000..39007ebe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PodPresets returns a PodPresetInformer. + PodPresets() PodPresetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PodPresets returns a PodPresetInformer. +func (v *version) PodPresets() PodPresetInformer { + return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go new file mode 100644 index 00000000..2e630c73 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + settings_v1alpha1 "k8s.io/api/settings/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/settings/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodPresetInformer provides access to a shared informer and lister for +// PodPresets. +type PodPresetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PodPresetLister +} + +type podPresetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SettingsV1alpha1().PodPresets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SettingsV1alpha1().PodPresets(namespace).Watch(options) + }, + }, + &settings_v1alpha1.PodPreset{}, + resyncPeriod, + indexers, + ) +} + +func (f *podPresetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podPresetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&settings_v1alpha1.PodPreset{}, f.defaultInformer) +} + +func (f *podPresetInformer) Lister() v1alpha1.PodPresetLister { + return v1alpha1.NewPodPresetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/interface.go b/vendor/k8s.io/client-go/informers/storage/interface.go new file mode 100644 index 00000000..b91613a9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package storage + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/storage/v1" + v1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" + v1beta1 "k8s.io/client-go/informers/storage/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go new file mode 100644 index 00000000..fadb1a07 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // StorageClasses returns a StorageClassInformer. + StorageClasses() StorageClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// StorageClasses returns a StorageClassInformer. +func (v *version) StorageClasses() StorageClassInformer { + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go new file mode 100644 index 00000000..341549f0 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + storage_v1 "k8s.io/api/storage/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StorageClassInformer provides access to a shared informer and lister for +// StorageClasses. +type StorageClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.StorageClassLister +} + +type storageClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().StorageClasses().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().StorageClasses().Watch(options) + }, + }, + &storage_v1.StorageClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1.StorageClass{}, f.defaultInformer) +} + +func (f *storageClassInformer) Lister() v1.StorageClassLister { + return v1.NewStorageClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go new file mode 100644 index 00000000..d84eb5fd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..cab9ffc4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().Watch(options) + }, + }, + &storage_v1alpha1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1alpha1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister { + return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go new file mode 100644 index 00000000..7fa1abf5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // StorageClasses returns a StorageClassInformer. + StorageClasses() StorageClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// StorageClasses returns a StorageClassInformer. +func (v *version) StorageClasses() StorageClassInformer { + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go new file mode 100644 index 00000000..3e96b282 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + storage_v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StorageClassInformer provides access to a shared informer and lister for +// StorageClasses. +type StorageClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.StorageClassLister +} + +type storageClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().StorageClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().StorageClasses().Watch(options) + }, + }, + &storage_v1beta1.StorageClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1beta1.StorageClass{}, f.defaultInformer) +} + +func (f *storageClassInformer) Lister() v1beta1.StorageClassLister { + return v1beta1.NewStorageClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 310eb61b..7dcf86a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,6 +20,7 @@ import ( glog "github.com/golang/glog" discovery "k8s.io/client-go/discovery" admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" @@ -34,6 +35,7 @@ import ( batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -43,6 +45,7 @@ import ( schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -51,8 +54,9 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface AppsV1() appsv1.AppsV1Interface @@ -81,6 +85,9 @@ type Interface interface { CoreV1() corev1.CoreV1Interface // Deprecated: please explicitly pick a version if possible. Core() corev1.CoreV1Interface + EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface // Deprecated: please explicitly pick a version if possible. Extensions() extensionsv1beta1.ExtensionsV1beta1Interface @@ -105,6 +112,7 @@ type Interface interface { StorageV1() storagev1.StorageV1Interface // Deprecated: please explicitly pick a version if possible. Storage() storagev1.StorageV1Interface + StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -112,6 +120,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client appsV1 *appsv1.AppsV1Client @@ -126,6 +135,7 @@ type Clientset struct { batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -136,6 +146,7 @@ type Clientset struct { settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } // AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client @@ -143,10 +154,15 @@ func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha return c.admissionregistrationV1alpha1 } +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 +} + // Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. // Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -261,6 +277,17 @@ func (c *Clientset) Core() corev1.CoreV1Interface { return c.coreV1 } +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 @@ -353,6 +380,11 @@ func (c *Clientset) Storage() storagev1.StorageV1Interface { return c.storageV1 } +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return c.storageV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -373,6 +405,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -429,6 +465,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -469,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -483,6 +527,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) @@ -497,6 +542,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -507,6 +553,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) + cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -516,6 +563,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.appsV1 = appsv1.New(c) @@ -530,6 +578,7 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coreV1 = corev1.New(c) + cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -540,6 +589,7 @@ func New(c rest.Interface) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) + cs.storageV1alpha1 = storagev1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 984b8bc6..7bfd3361 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -18,6 +18,7 @@ package scheme import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -32,6 +33,7 @@ import ( batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -41,6 +43,7 @@ import ( schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -73,6 +76,7 @@ func init() { // correctly. func AddToScheme(scheme *runtime.Scheme) { admissionregistrationv1alpha1.AddToScheme(scheme) + admissionregistrationv1beta1.AddToScheme(scheme) appsv1beta1.AddToScheme(scheme) appsv1beta2.AddToScheme(scheme) appsv1.AddToScheme(scheme) @@ -87,6 +91,7 @@ func AddToScheme(scheme *runtime.Scheme) { batchv2alpha1.AddToScheme(scheme) certificatesv1beta1.AddToScheme(scheme) corev1.AddToScheme(scheme) + eventsv1beta1.AddToScheme(scheme) extensionsv1beta1.AddToScheme(scheme) networkingv1.AddToScheme(scheme) policyv1beta1.AddToScheme(scheme) @@ -97,5 +102,6 @@ func AddToScheme(scheme *runtime.Scheme) { settingsv1alpha1.AddToScheme(scheme) storagev1beta1.AddToScheme(scheme) storagev1.AddToScheme(scheme) + storagev1alpha1.AddToScheme(scheme) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index 95d53391..5150fee3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -25,7 +25,6 @@ import ( type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface - ExternalAdmissionHookConfigurationsGetter InitializerConfigurationsGetter } @@ -34,10 +33,6 @@ type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AdmissionregistrationV1alpha1Client) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface { - return newExternalAdmissionHookConfigurations(c) -} - func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { return newInitializerConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go deleted file mode 100644 index 1ddc6eb4..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ExternalAdmissionHookConfigurationsGetter has a method to return a ExternalAdmissionHookConfigurationInterface. -// A group's client should implement this interface. -type ExternalAdmissionHookConfigurationsGetter interface { - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface -} - -// ExternalAdmissionHookConfigurationInterface has methods to work with ExternalAdmissionHookConfiguration resources. -type ExternalAdmissionHookConfigurationInterface interface { - Create(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Update(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.ExternalAdmissionHookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) - ExternalAdmissionHookConfigurationExpansion -} - -// externalAdmissionHookConfigurations implements ExternalAdmissionHookConfigurationInterface -type externalAdmissionHookConfigurations struct { - client rest.Interface -} - -// newExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurations -func newExternalAdmissionHookConfigurations(c *AdmissionregistrationV1alpha1Client) *externalAdmissionHookConfigurations { - return &externalAdmissionHookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the externalAdmissionHookConfiguration, and returns the corresponding externalAdmissionHookConfiguration object, and an error if there is any. -func (c *externalAdmissionHookConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ExternalAdmissionHookConfigurations that match those selectors. -func (c *externalAdmissionHookConfigurations) List(opts v1.ListOptions) (result *v1alpha1.ExternalAdmissionHookConfigurationList, err error) { - result = &v1alpha1.ExternalAdmissionHookConfigurationList{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested externalAdmissionHookConfigurations. -func (c *externalAdmissionHookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a externalAdmissionHookConfiguration and creates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Create(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Post(). - Resource("externaladmissionhookconfigurations"). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a externalAdmissionHookConfiguration and updates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Update(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Put(). - Resource("externaladmissionhookconfigurations"). - Name(externalAdmissionHookConfiguration.Name). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the externalAdmissionHookConfiguration and deletes it. Returns an error if one occurs. -func (c *externalAdmissionHookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *externalAdmissionHookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched externalAdmissionHookConfiguration. -func (c *externalAdmissionHookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Patch(pt). - Resource("externaladmissionhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index eef439ab..ccdfb43f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -16,6 +16,4 @@ limitations under the License. package v1alpha1 -type ExternalAdmissionHookConfigurationExpansion interface{} - type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go new file mode 100644 index 00000000..8d3774b4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1beta1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1beta1Client { + return &AdmissionregistrationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go new file mode 100644 index 00000000..1b50aa19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go new file mode 100644 index 00000000..012a8da7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..36711a50 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + result = &v1beta1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..d6684922 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + result = &v1beta1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 20a09cd4..07936304 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -25,7 +25,11 @@ import ( type AppsV1Interface interface { RESTClient() rest.Interface + ControllerRevisionsGetter DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter } // AppsV1Client is used to interact with features provided by the apps group. @@ -33,10 +37,26 @@ type AppsV1Client struct { restClient rest.Interface } +func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { return newDaemonSets(c, namespace) } +func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + // NewForConfig creates a new AppsV1Client for the given config. func NewForConfig(c *rest.Config) (*AppsV1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go new file mode 100644 index 00000000..1d9f8313 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ControllerRevision, error) + List(opts meta_v1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options meta_v1.GetOptions) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts meta_v1.ListOptions) (result *v1.ControllerRevisionList, err error) { + result = &v1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go new file mode 100644 index 00000000..34c06c8d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1.Deployment) (*v1.Deployment, error) + Update(*v1.Deployment) (*v1.Deployment, error) + UpdateStatus(*v1.Deployment) (*v1.Deployment, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Deployment, error) + List(opts meta_v1.ListOptions) (*v1.DeploymentList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options meta_v1.GetOptions) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts meta_v1.ListOptions) (result *v1.DeploymentList, err error) { + result = &v1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go index face4d0b..500d67dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -16,4 +16,12 @@ limitations under the License. package v1 +type ControllerRevisionExpansion interface{} + type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go new file mode 100644 index 00000000..5047b0c5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) + UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicaSet, error) + List(opts meta_v1.ListOptions) (*v1.ReplicaSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts meta_v1.ListOptions) (result *v1.ReplicaSetList, err error) { + result = &v1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go new file mode 100644 index 00000000..2c927ac0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1.StatefulSet) (*v1.StatefulSet, error) + Update(*v1.StatefulSet) (*v1.StatefulSet, error) + UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StatefulSet, error) + List(opts meta_v1.ListOptions) (*v1.StatefulSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options meta_v1.GetOptions) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts meta_v1.ListOptions) (result *v1.StatefulSetList, err error) { + result = &v1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go new file mode 100644 index 00000000..1b50aa19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go new file mode 100644 index 00000000..3473e99c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1beta1.Event) (*v1beta1.Event, error) + Update(*v1beta1.Event) (*v1beta1.Event, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Event, error) + List(opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *EventsV1beta1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + result = &v1beta1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go new file mode 100644 index 00000000..05cee7fb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type EventsV1beta1Interface interface { + RESTClient() rest.Interface + EventsGetter +} + +// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. +type EventsV1beta1Client struct { + restClient rest.Interface +} + +func (c *EventsV1beta1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +// NewForConfig creates a new EventsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *EventsV1beta1Client { + return &EventsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go new file mode 100644 index 00000000..82b2fb4a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index c1798f67..b4f8886a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -31,7 +31,6 @@ type ExtensionsV1beta1Interface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter - ThirdPartyResourcesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -63,10 +62,6 @@ func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index d0a3d64b..e693fe68 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -23,5 +23,3 @@ type IngressExpansion interface{} type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 28fb6dcb..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) - List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client rest.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - result = &v1beta1.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go new file mode 100644 index 00000000..cdaaf620 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..afa636a2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go new file mode 100644 index 00000000..3e8c70bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeAttachmentsGetter +} + +// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1alpha1Client { + return &StorageV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..08bdfb25 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + result = &v1alpha1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..fb3b0098 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// InitializerConfigurationListerExpansion allows custom methods to be added to +// InitializerConfigurationLister. +type InitializerConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 00000000..60b004ef --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// InitializerConfigurationLister helps list InitializerConfigurations. +type InitializerConfigurationLister interface { + // List lists all InitializerConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) + // Get retrieves the InitializerConfiguration from the index for a given name. + Get(name string) (*v1alpha1.InitializerConfiguration, error) + InitializerConfigurationListerExpansion +} + +// initializerConfigurationLister implements the InitializerConfigurationLister interface. +type initializerConfigurationLister struct { + indexer cache.Indexer +} + +// NewInitializerConfigurationLister returns a new InitializerConfigurationLister. +func NewInitializerConfigurationLister(indexer cache.Indexer) InitializerConfigurationLister { + return &initializerConfigurationLister{indexer: indexer} +} + +// List lists all InitializerConfigurations in the indexer. +func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InitializerConfiguration)) + }) + return ret, err +} + +// Get retrieves the InitializerConfiguration from the index for a given name. +func (s *initializerConfigurationLister) Get(name string) (*v1alpha1.InitializerConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("initializerconfiguration"), name) + } + return obj.(*v1alpha1.InitializerConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go new file mode 100644 index 00000000..c9bf0fa5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..753dd185 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..6cb6067a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go new file mode 100644 index 00000000..c05d14c2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) + } + return obj.(*v1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go new file mode 100644 index 00000000..307f8bc7 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("daemonset"), name) + } + return obj.(*v1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go new file mode 100644 index 00000000..83435561 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go new file mode 100644 index 00000000..36af9009 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deployment"), name) + } + return obj.(*v1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go new file mode 100644 index 00000000..7802eca5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*apps.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go new file mode 100644 index 00000000..48917c2c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go new file mode 100644 index 00000000..7e316d6b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicaset"), name) + } + return obj.(*v1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go new file mode 100644 index 00000000..675e615a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go new file mode 100644 index 00000000..fe584038 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("statefulset"), name) + } + return obj.(*v1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go new file mode 100644 index 00000000..b4912976 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go new file mode 100644 index 00000000..f3c85bfa --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1beta1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("controllerrevision"), name) + } + return obj.(*v1beta1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go new file mode 100644 index 00000000..f59f3a96 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) + } + return obj.(*v1beta1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go new file mode 100644 index 00000000..441ceecd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface{} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go new file mode 100644 index 00000000..ec9a419a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go new file mode 100644 index 00000000..f10ef731 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1beta1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("statefulset"), name) + } + return obj.(*v1beta1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go new file mode 100644 index 00000000..0741792a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go new file mode 100644 index 00000000..f9f1ef06 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1beta2.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta2.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("controllerrevision"), name) + } + return obj.(*v1beta2.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go new file mode 100644 index 00000000..cbdb13ef --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1beta2.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("daemonset"), name) + } + return obj.(*v1beta2.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go new file mode 100644 index 00000000..3b01aaa4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go new file mode 100644 index 00000000..0778a9fd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta2.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta2.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("deployment"), name) + } + return obj.(*v1beta2.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go new file mode 100644 index 00000000..1537167a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*apps.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go new file mode 100644 index 00000000..6db63d4b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go new file mode 100644 index 00000000..f76e2eeb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1beta2.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("replicaset"), name) + } + return obj.(*v1beta2.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go new file mode 100644 index 00000000..7562fe99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go new file mode 100644 index 00000000..11cb3e19 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta2.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) + } + return obj.(*v1beta2.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go new file mode 100644 index 00000000..13ef28f8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1beta2.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("statefulset"), name) + } + return obj.(*v1beta2.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go new file mode 100644 index 00000000..6fa6b914 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go new file mode 100644 index 00000000..f7b00603 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerLister. +type HorizontalPodAutoscalerListerExpansion interface{} + +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerNamespaceLister. +type HorizontalPodAutoscalerNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 00000000..48012203 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. +type HorizontalPodAutoscalerLister interface { + // List lists all HorizontalPodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister + HorizontalPodAutoscalerListerExpansion +} + +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. +type horizontalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { + return &horizontalPodAutoscalerLister{indexer: indexer} +} + +// List lists all HorizontalPodAutoscalers in the indexer. +func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { + return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. +type HorizontalPodAutoscalerNamespaceLister interface { + // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v1.HorizontalPodAutoscaler, error) + HorizontalPodAutoscalerNamespaceListerExpansion +} + +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister +// interface. +type horizontalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. +func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. +func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v1.HorizontalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("horizontalpodautoscaler"), name) + } + return obj.(*v1.HorizontalPodAutoscaler), nil +} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go new file mode 100644 index 00000000..9e84ef13 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2beta1 + +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerLister. +type HorizontalPodAutoscalerListerExpansion interface{} + +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerNamespaceLister. +type HorizontalPodAutoscalerNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go new file mode 100644 index 00000000..c8fbdecd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2beta1 + +import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. +type HorizontalPodAutoscalerLister interface { + // List lists all HorizontalPodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister + HorizontalPodAutoscalerListerExpansion +} + +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. +type horizontalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { + return &horizontalPodAutoscalerLister{indexer: indexer} +} + +// List lists all HorizontalPodAutoscalers in the indexer. +func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { + return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. +type HorizontalPodAutoscalerNamespaceLister interface { + // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) + HorizontalPodAutoscalerNamespaceListerExpansion +} + +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister +// interface. +type horizontalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. +func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. +func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2beta1.Resource("horizontalpodautoscaler"), name) + } + return obj.(*v2beta1.HorizontalPodAutoscaler), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go new file mode 100644 index 00000000..38b7e272 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go new file mode 100644 index 00000000..89280d9f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// JobLister helps list Jobs. +type JobLister interface { + // List lists all Jobs in the indexer. + List(selector labels.Selector) (ret []*v1.Job, err error) + // Jobs returns an object that can list and get Jobs. + Jobs(namespace string) JobNamespaceLister + JobListerExpansion +} + +// jobLister implements the JobLister interface. +type jobLister struct { + indexer cache.Indexer +} + +// NewJobLister returns a new JobLister. +func NewJobLister(indexer cache.Indexer) JobLister { + return &jobLister{indexer: indexer} +} + +// List lists all Jobs in the indexer. +func (s *jobLister) List(selector labels.Selector) (ret []*v1.Job, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Job)) + }) + return ret, err +} + +// Jobs returns an object that can list and get Jobs. +func (s *jobLister) Jobs(namespace string) JobNamespaceLister { + return jobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// JobNamespaceLister helps list and get Jobs. +type JobNamespaceLister interface { + // List lists all Jobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Job, err error) + // Get retrieves the Job from the indexer for a given namespace and name. + Get(name string) (*v1.Job, error) + JobNamespaceListerExpansion +} + +// jobNamespaceLister implements the JobNamespaceLister +// interface. +type jobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Jobs in the indexer for a given namespace. +func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1.Job, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Job)) + }) + return ret, err +} + +// Get retrieves the Job from the indexer for a given namespace and name. +func (s jobNamespaceLister) Get(name string) (*v1.Job, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("job"), name) + } + return obj.(*v1.Job), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go new file mode 100644 index 00000000..fdcd5f32 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + batch "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// JobListerExpansion allows custom methods to be added to +// JobLister. +type JobListerExpansion interface { + // GetPodJobs returns a list of Jobs that potentially + // match a Pod. Only the one specified in the Pod's ControllerRef + // will actually manage it. + // Returns an error only if no matching Jobs are found. + GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error) +} + +// GetPodJobs returns a list of Jobs that potentially +// match a Pod. Only the one specified in the Pod's ControllerRef +// will actually manage it. +// Returns an error only if no matching Jobs are found. +func (l *jobLister) GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error) { + if len(pod.Labels) == 0 { + err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name) + return + } + + var list []*batch.Job + list, err = l.Jobs(pod.Namespace).List(labels.Everything()) + if err != nil { + return + } + for _, job := range list { + selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector) + if !selector.Matches(labels.Set(pod.Labels)) { + continue + } + jobs = append(jobs, *job) + } + if len(jobs) == 0 { + err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// JobNamespaceListerExpansion allows custom methods to be added to +// JobNamespaceLister. +type JobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go new file mode 100644 index 00000000..a8fa51ec --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CronJobLister helps list CronJobs. +type CronJobLister interface { + // List lists all CronJobs in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + // CronJobs returns an object that can list and get CronJobs. + CronJobs(namespace string) CronJobNamespaceLister + CronJobListerExpansion +} + +// cronJobLister implements the CronJobLister interface. +type cronJobLister struct { + indexer cache.Indexer +} + +// NewCronJobLister returns a new CronJobLister. +func NewCronJobLister(indexer cache.Indexer) CronJobLister { + return &cronJobLister{indexer: indexer} +} + +// List lists all CronJobs in the indexer. +func (s *cronJobLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CronJob)) + }) + return ret, err +} + +// CronJobs returns an object that can list and get CronJobs. +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { + return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CronJobNamespaceLister helps list and get CronJobs. +type CronJobNamespaceLister interface { + // List lists all CronJobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + // Get retrieves the CronJob from the indexer for a given namespace and name. + Get(name string) (*v1beta1.CronJob, error) + CronJobNamespaceListerExpansion +} + +// cronJobNamespaceLister implements the CronJobNamespaceLister +// interface. +type cronJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CronJobs in the indexer for a given namespace. +func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CronJob)) + }) + return ret, err +} + +// Get retrieves the CronJob from the indexer for a given namespace and name. +func (s cronJobNamespaceLister) Get(name string) (*v1beta1.CronJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("cronjob"), name) + } + return obj.(*v1beta1.CronJob), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go new file mode 100644 index 00000000..3d84d249 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// CronJobListerExpansion allows custom methods to be added to +// CronJobLister. +type CronJobListerExpansion interface{} + +// CronJobNamespaceListerExpansion allows custom methods to be added to +// CronJobNamespaceLister. +type CronJobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go new file mode 100644 index 00000000..51f5eef5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2alpha1 + +import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CronJobLister helps list CronJobs. +type CronJobLister interface { + // List lists all CronJobs in the indexer. + List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) + // CronJobs returns an object that can list and get CronJobs. + CronJobs(namespace string) CronJobNamespaceLister + CronJobListerExpansion +} + +// cronJobLister implements the CronJobLister interface. +type cronJobLister struct { + indexer cache.Indexer +} + +// NewCronJobLister returns a new CronJobLister. +func NewCronJobLister(indexer cache.Indexer) CronJobLister { + return &cronJobLister{indexer: indexer} +} + +// List lists all CronJobs in the indexer. +func (s *cronJobLister) List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CronJob)) + }) + return ret, err +} + +// CronJobs returns an object that can list and get CronJobs. +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { + return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CronJobNamespaceLister helps list and get CronJobs. +type CronJobNamespaceLister interface { + // List lists all CronJobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) + // Get retrieves the CronJob from the indexer for a given namespace and name. + Get(name string) (*v2alpha1.CronJob, error) + CronJobNamespaceListerExpansion +} + +// cronJobNamespaceLister implements the CronJobNamespaceLister +// interface. +type cronJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CronJobs in the indexer for a given namespace. +func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CronJob)) + }) + return ret, err +} + +// Get retrieves the CronJob from the indexer for a given namespace and name. +func (s cronJobNamespaceLister) Get(name string) (*v2alpha1.CronJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("cronjob"), name) + } + return obj.(*v2alpha1.CronJob), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go new file mode 100644 index 00000000..38ac70cd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2alpha1 + +// CronJobListerExpansion allows custom methods to be added to +// CronJobLister. +type CronJobListerExpansion interface{} + +// CronJobNamespaceListerExpansion allows custom methods to be added to +// CronJobNamespaceLister. +type CronJobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 00000000..425dc6b4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestLister helps list CertificateSigningRequests. +type CertificateSigningRequestLister interface { + // List lists all CertificateSigningRequests in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) + // Get retrieves the CertificateSigningRequest from the index for a given name. + Get(name string) (*v1beta1.CertificateSigningRequest, error) + CertificateSigningRequestListerExpansion +} + +// certificateSigningRequestLister implements the CertificateSigningRequestLister interface. +type certificateSigningRequestLister struct { + indexer cache.Indexer +} + +// NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. +func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { + return &certificateSigningRequestLister{indexer: indexer} +} + +// List lists all CertificateSigningRequests in the indexer. +func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CertificateSigningRequest)) + }) + return ret, err +} + +// Get retrieves the CertificateSigningRequest from the index for a given name. +func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequest"), name) + } + return obj.(*v1beta1.CertificateSigningRequest), nil +} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go new file mode 100644 index 00000000..c240be44 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// CertificateSigningRequestListerExpansion allows custom methods to be added to +// CertificateSigningRequestLister. +type CertificateSigningRequestListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go new file mode 100644 index 00000000..6ba67d0b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ComponentStatusLister helps list ComponentStatuses. +type ComponentStatusLister interface { + // List lists all ComponentStatuses in the indexer. + List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + // Get retrieves the ComponentStatus from the index for a given name. + Get(name string) (*v1.ComponentStatus, error) + ComponentStatusListerExpansion +} + +// componentStatusLister implements the ComponentStatusLister interface. +type componentStatusLister struct { + indexer cache.Indexer +} + +// NewComponentStatusLister returns a new ComponentStatusLister. +func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { + return &componentStatusLister{indexer: indexer} +} + +// List lists all ComponentStatuses in the indexer. +func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ComponentStatus)) + }) + return ret, err +} + +// Get retrieves the ComponentStatus from the index for a given name. +func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) + } + return obj.(*v1.ComponentStatus), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go new file mode 100644 index 00000000..e976928d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ConfigMapLister helps list ConfigMaps. +type ConfigMapLister interface { + // List lists all ConfigMaps in the indexer. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // ConfigMaps returns an object that can list and get ConfigMaps. + ConfigMaps(namespace string) ConfigMapNamespaceLister + ConfigMapListerExpansion +} + +// configMapLister implements the ConfigMapLister interface. +type configMapLister struct { + indexer cache.Indexer +} + +// NewConfigMapLister returns a new ConfigMapLister. +func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { + return &configMapLister{indexer: indexer} +} + +// List lists all ConfigMaps in the indexer. +func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// ConfigMaps returns an object that can list and get ConfigMaps. +func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { + return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigMapNamespaceLister helps list and get ConfigMaps. +type ConfigMapNamespaceLister interface { + // List lists all ConfigMaps in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // Get retrieves the ConfigMap from the indexer for a given namespace and name. + Get(name string) (*v1.ConfigMap, error) + ConfigMapNamespaceListerExpansion +} + +// configMapNamespaceLister implements the ConfigMapNamespaceLister +// interface. +type configMapNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ConfigMaps in the indexer for a given namespace. +func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// Get retrieves the ConfigMap from the indexer for a given namespace and name. +func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("configmap"), name) + } + return obj.(*v1.ConfigMap), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go new file mode 100644 index 00000000..6f5a1133 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointsLister helps list Endpoints. +type EndpointsLister interface { + // List lists all Endpoints in the indexer. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Endpoints returns an object that can list and get Endpoints. + Endpoints(namespace string) EndpointsNamespaceLister + EndpointsListerExpansion +} + +// endpointsLister implements the EndpointsLister interface. +type endpointsLister struct { + indexer cache.Indexer +} + +// NewEndpointsLister returns a new EndpointsLister. +func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { + return &endpointsLister{indexer: indexer} +} + +// List lists all Endpoints in the indexer. +func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Endpoints returns an object that can list and get Endpoints. +func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { + return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointsNamespaceLister helps list and get Endpoints. +type EndpointsNamespaceLister interface { + // List lists all Endpoints in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Get retrieves the Endpoints from the indexer for a given namespace and name. + Get(name string) (*v1.Endpoints, error) + EndpointsNamespaceListerExpansion +} + +// endpointsNamespaceLister implements the EndpointsNamespaceLister +// interface. +type endpointsNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Endpoints in the indexer for a given namespace. +func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Get retrieves the Endpoints from the indexer for a given namespace and name. +func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("endpoints"), name) + } + return obj.(*v1.Endpoints), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go new file mode 100644 index 00000000..b087cd8b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("event"), name) + } + return obj.(*v1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go new file mode 100644 index 00000000..a96db8dc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ComponentStatusListerExpansion allows custom methods to be added to +// ComponentStatusLister. +type ComponentStatusListerExpansion interface{} + +// ConfigMapListerExpansion allows custom methods to be added to +// ConfigMapLister. +type ConfigMapListerExpansion interface{} + +// ConfigMapNamespaceListerExpansion allows custom methods to be added to +// ConfigMapNamespaceLister. +type ConfigMapNamespaceListerExpansion interface{} + +// EndpointsListerExpansion allows custom methods to be added to +// EndpointsLister. +type EndpointsListerExpansion interface{} + +// EndpointsNamespaceListerExpansion allows custom methods to be added to +// EndpointsNamespaceLister. +type EndpointsNamespaceListerExpansion interface{} + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} + +// LimitRangeListerExpansion allows custom methods to be added to +// LimitRangeLister. +type LimitRangeListerExpansion interface{} + +// LimitRangeNamespaceListerExpansion allows custom methods to be added to +// LimitRangeNamespaceLister. +type LimitRangeNamespaceListerExpansion interface{} + +// NamespaceListerExpansion allows custom methods to be added to +// NamespaceLister. +type NamespaceListerExpansion interface{} + +// PersistentVolumeListerExpansion allows custom methods to be added to +// PersistentVolumeLister. +type PersistentVolumeListerExpansion interface{} + +// PersistentVolumeClaimListerExpansion allows custom methods to be added to +// PersistentVolumeClaimLister. +type PersistentVolumeClaimListerExpansion interface{} + +// PersistentVolumeClaimNamespaceListerExpansion allows custom methods to be added to +// PersistentVolumeClaimNamespaceLister. +type PersistentVolumeClaimNamespaceListerExpansion interface{} + +// PodListerExpansion allows custom methods to be added to +// PodLister. +type PodListerExpansion interface{} + +// PodNamespaceListerExpansion allows custom methods to be added to +// PodNamespaceLister. +type PodNamespaceListerExpansion interface{} + +// PodTemplateListerExpansion allows custom methods to be added to +// PodTemplateLister. +type PodTemplateListerExpansion interface{} + +// PodTemplateNamespaceListerExpansion allows custom methods to be added to +// PodTemplateNamespaceLister. +type PodTemplateNamespaceListerExpansion interface{} + +// ResourceQuotaListerExpansion allows custom methods to be added to +// ResourceQuotaLister. +type ResourceQuotaListerExpansion interface{} + +// ResourceQuotaNamespaceListerExpansion allows custom methods to be added to +// ResourceQuotaNamespaceLister. +type ResourceQuotaNamespaceListerExpansion interface{} + +// SecretListerExpansion allows custom methods to be added to +// SecretLister. +type SecretListerExpansion interface{} + +// SecretNamespaceListerExpansion allows custom methods to be added to +// SecretNamespaceLister. +type SecretNamespaceListerExpansion interface{} + +// ServiceAccountListerExpansion allows custom methods to be added to +// ServiceAccountLister. +type ServiceAccountListerExpansion interface{} + +// ServiceAccountNamespaceListerExpansion allows custom methods to be added to +// ServiceAccountNamespaceLister. +type ServiceAccountNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go new file mode 100644 index 00000000..f1994375 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// LimitRangeLister helps list LimitRanges. +type LimitRangeLister interface { + // List lists all LimitRanges in the indexer. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // LimitRanges returns an object that can list and get LimitRanges. + LimitRanges(namespace string) LimitRangeNamespaceLister + LimitRangeListerExpansion +} + +// limitRangeLister implements the LimitRangeLister interface. +type limitRangeLister struct { + indexer cache.Indexer +} + +// NewLimitRangeLister returns a new LimitRangeLister. +func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { + return &limitRangeLister{indexer: indexer} +} + +// List lists all LimitRanges in the indexer. +func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// LimitRanges returns an object that can list and get LimitRanges. +func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { + return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// LimitRangeNamespaceLister helps list and get LimitRanges. +type LimitRangeNamespaceLister interface { + // List lists all LimitRanges in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // Get retrieves the LimitRange from the indexer for a given namespace and name. + Get(name string) (*v1.LimitRange, error) + LimitRangeNamespaceListerExpansion +} + +// limitRangeNamespaceLister implements the LimitRangeNamespaceLister +// interface. +type limitRangeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all LimitRanges in the indexer for a given namespace. +func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// Get retrieves the LimitRange from the indexer for a given namespace and name. +func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("limitrange"), name) + } + return obj.(*v1.LimitRange), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go new file mode 100644 index 00000000..21be6878 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NamespaceLister helps list Namespaces. +type NamespaceLister interface { + // List lists all Namespaces in the indexer. + List(selector labels.Selector) (ret []*v1.Namespace, err error) + // Get retrieves the Namespace from the index for a given name. + Get(name string) (*v1.Namespace, error) + NamespaceListerExpansion +} + +// namespaceLister implements the NamespaceLister interface. +type namespaceLister struct { + indexer cache.Indexer +} + +// NewNamespaceLister returns a new NamespaceLister. +func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { + return &namespaceLister{indexer: indexer} +} + +// List lists all Namespaces in the indexer. +func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Namespace)) + }) + return ret, err +} + +// Get retrieves the Namespace from the index for a given name. +func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("namespace"), name) + } + return obj.(*v1.Namespace), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go new file mode 100644 index 00000000..d43a682c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NodeLister helps list Nodes. +type NodeLister interface { + // List lists all Nodes in the indexer. + List(selector labels.Selector) (ret []*v1.Node, err error) + // Get retrieves the Node from the index for a given name. + Get(name string) (*v1.Node, error) + NodeListerExpansion +} + +// nodeLister implements the NodeLister interface. +type nodeLister struct { + indexer cache.Indexer +} + +// NewNodeLister returns a new NodeLister. +func NewNodeLister(indexer cache.Indexer) NodeLister { + return &nodeLister{indexer: indexer} +} + +// List lists all Nodes in the indexer. +func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Node)) + }) + return ret, err +} + +// Get retrieves the Node from the index for a given name. +func (s *nodeLister) Get(name string) (*v1.Node, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("node"), name) + } + return obj.(*v1.Node), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go new file mode 100644 index 00000000..9e5c55ab --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// NodeConditionPredicate is a function that indicates whether the given node's conditions meet +// some set of criteria defined by the function. +type NodeConditionPredicate func(node *v1.Node) bool + +// NodeListerExpansion allows custom methods to be added to +// NodeLister. +type NodeListerExpansion interface { + ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) +} + +func (l *nodeLister) ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) { + nodes, err := l.List(labels.Everything()) + if err != nil { + return nil, err + } + + var filtered []*v1.Node + for i := range nodes { + if predicate(nodes[i]) { + filtered = append(filtered, nodes[i]) + } + } + + return filtered, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go new file mode 100644 index 00000000..593ba14e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeLister helps list PersistentVolumes. +type PersistentVolumeLister interface { + // List lists all PersistentVolumes in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + // Get retrieves the PersistentVolume from the index for a given name. + Get(name string) (*v1.PersistentVolume, error) + PersistentVolumeListerExpansion +} + +// persistentVolumeLister implements the PersistentVolumeLister interface. +type persistentVolumeLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeLister returns a new PersistentVolumeLister. +func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { + return &persistentVolumeLister{indexer: indexer} +} + +// List lists all PersistentVolumes in the indexer. +func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolume)) + }) + return ret, err +} + +// Get retrieves the PersistentVolume from the index for a given name. +func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) + } + return obj.(*v1.PersistentVolume), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000..72ddac93 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeClaimLister helps list PersistentVolumeClaims. +type PersistentVolumeClaimLister interface { + // List lists all PersistentVolumeClaims in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. + PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister + PersistentVolumeClaimListerExpansion +} + +// persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. +type persistentVolumeClaimLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. +func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { + return &persistentVolumeClaimLister{indexer: indexer} +} + +// List lists all PersistentVolumeClaims in the indexer. +func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. +func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { + return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. +type PersistentVolumeClaimNamespaceLister interface { + // List lists all PersistentVolumeClaims in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. + Get(name string) (*v1.PersistentVolumeClaim, error) + PersistentVolumeClaimNamespaceListerExpansion +} + +// persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister +// interface. +type persistentVolumeClaimNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PersistentVolumeClaims in the indexer for a given namespace. +func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. +func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) + } + return obj.(*v1.PersistentVolumeClaim), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go new file mode 100644 index 00000000..6cf4a842 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodLister helps list Pods. +type PodLister interface { + // List lists all Pods in the indexer. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Pods returns an object that can list and get Pods. + Pods(namespace string) PodNamespaceLister + PodListerExpansion +} + +// podLister implements the PodLister interface. +type podLister struct { + indexer cache.Indexer +} + +// NewPodLister returns a new PodLister. +func NewPodLister(indexer cache.Indexer) PodLister { + return &podLister{indexer: indexer} +} + +// List lists all Pods in the indexer. +func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Pods returns an object that can list and get Pods. +func (s *podLister) Pods(namespace string) PodNamespaceLister { + return podNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodNamespaceLister helps list and get Pods. +type PodNamespaceLister interface { + // List lists all Pods in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Get retrieves the Pod from the indexer for a given namespace and name. + Get(name string) (*v1.Pod, error) + PodNamespaceListerExpansion +} + +// podNamespaceLister implements the PodNamespaceLister +// interface. +type podNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Pods in the indexer for a given namespace. +func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Get retrieves the Pod from the indexer for a given namespace and name. +func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("pod"), name) + } + return obj.(*v1.Pod), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go new file mode 100644 index 00000000..d825c747 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodTemplateLister helps list PodTemplates. +type PodTemplateLister interface { + // List lists all PodTemplates in the indexer. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // PodTemplates returns an object that can list and get PodTemplates. + PodTemplates(namespace string) PodTemplateNamespaceLister + PodTemplateListerExpansion +} + +// podTemplateLister implements the PodTemplateLister interface. +type podTemplateLister struct { + indexer cache.Indexer +} + +// NewPodTemplateLister returns a new PodTemplateLister. +func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { + return &podTemplateLister{indexer: indexer} +} + +// List lists all PodTemplates in the indexer. +func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// PodTemplates returns an object that can list and get PodTemplates. +func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { + return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodTemplateNamespaceLister helps list and get PodTemplates. +type PodTemplateNamespaceLister interface { + // List lists all PodTemplates in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // Get retrieves the PodTemplate from the indexer for a given namespace and name. + Get(name string) (*v1.PodTemplate, error) + PodTemplateNamespaceListerExpansion +} + +// podTemplateNamespaceLister implements the PodTemplateNamespaceLister +// interface. +type podTemplateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodTemplates in the indexer for a given namespace. +func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// Get retrieves the PodTemplate from the indexer for a given namespace and name. +func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) + } + return obj.(*v1.PodTemplate), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go new file mode 100644 index 00000000..6670a9d9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicationControllerLister helps list ReplicationControllers. +type ReplicationControllerLister interface { + // List lists all ReplicationControllers in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // ReplicationControllers returns an object that can list and get ReplicationControllers. + ReplicationControllers(namespace string) ReplicationControllerNamespaceLister + ReplicationControllerListerExpansion +} + +// replicationControllerLister implements the ReplicationControllerLister interface. +type replicationControllerLister struct { + indexer cache.Indexer +} + +// NewReplicationControllerLister returns a new ReplicationControllerLister. +func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { + return &replicationControllerLister{indexer: indexer} +} + +// List lists all ReplicationControllers in the indexer. +func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// ReplicationControllers returns an object that can list and get ReplicationControllers. +func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { + return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicationControllerNamespaceLister helps list and get ReplicationControllers. +type ReplicationControllerNamespaceLister interface { + // List lists all ReplicationControllers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // Get retrieves the ReplicationController from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicationController, error) + ReplicationControllerNamespaceListerExpansion +} + +// replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister +// interface. +type replicationControllerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicationControllers in the indexer for a given namespace. +func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// Get retrieves the ReplicationController from the indexer for a given namespace and name. +func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) + } + return obj.(*v1.ReplicationController), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go new file mode 100644 index 00000000..b031d521 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicationControllerListerExpansion allows custom methods to be added to +// ReplicationControllerLister. +type ReplicationControllerListerExpansion interface { + GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) +} + +// ReplicationControllerNamespaceListerExpansion allows custom methods to be added to +// ReplicationControllerNamespaceLister. +type ReplicationControllerNamespaceListerExpansion interface{} + +// GetPodControllers returns a list of ReplicationControllers that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicationControllers are found. +func (s *replicationControllerLister) GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) + } + + items, err := s.ReplicationControllers(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var controllers []*v1.ReplicationController + for i := range items { + rc := items[i] + selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() + + // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + controllers = append(controllers, rc) + } + + if len(controllers) == 0 { + return nil, fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return controllers, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go new file mode 100644 index 00000000..713a4151 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceQuotaLister helps list ResourceQuotas. +type ResourceQuotaLister interface { + // List lists all ResourceQuotas in the indexer. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // ResourceQuotas returns an object that can list and get ResourceQuotas. + ResourceQuotas(namespace string) ResourceQuotaNamespaceLister + ResourceQuotaListerExpansion +} + +// resourceQuotaLister implements the ResourceQuotaLister interface. +type resourceQuotaLister struct { + indexer cache.Indexer +} + +// NewResourceQuotaLister returns a new ResourceQuotaLister. +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { + return &resourceQuotaLister{indexer: indexer} +} + +// List lists all ResourceQuotas in the indexer. +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// ResourceQuotas returns an object that can list and get ResourceQuotas. +func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { + return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceQuotaNamespaceLister helps list and get ResourceQuotas. +type ResourceQuotaNamespaceLister interface { + // List lists all ResourceQuotas in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // Get retrieves the ResourceQuota from the indexer for a given namespace and name. + Get(name string) (*v1.ResourceQuota, error) + ResourceQuotaNamespaceListerExpansion +} + +// resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister +// interface. +type resourceQuotaNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceQuotas in the indexer for a given namespace. +func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// Get retrieves the ResourceQuota from the indexer for a given namespace and name. +func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) + } + return obj.(*v1.ResourceQuota), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go new file mode 100644 index 00000000..26ef13d9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// SecretLister helps list Secrets. +type SecretLister interface { + // List lists all Secrets in the indexer. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Secrets returns an object that can list and get Secrets. + Secrets(namespace string) SecretNamespaceLister + SecretListerExpansion +} + +// secretLister implements the SecretLister interface. +type secretLister struct { + indexer cache.Indexer +} + +// NewSecretLister returns a new SecretLister. +func NewSecretLister(indexer cache.Indexer) SecretLister { + return &secretLister{indexer: indexer} +} + +// List lists all Secrets in the indexer. +func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Secrets returns an object that can list and get Secrets. +func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { + return secretNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SecretNamespaceLister helps list and get Secrets. +type SecretNamespaceLister interface { + // List lists all Secrets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Get retrieves the Secret from the indexer for a given namespace and name. + Get(name string) (*v1.Secret, error) + SecretNamespaceListerExpansion +} + +// secretNamespaceLister implements the SecretNamespaceLister +// interface. +type secretNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Secrets in the indexer for a given namespace. +func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Get retrieves the Secret from the indexer for a given namespace and name. +func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("secret"), name) + } + return obj.(*v1.Secret), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go new file mode 100644 index 00000000..895a6922 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("service"), name) + } + return obj.(*v1.Service), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go new file mode 100644 index 00000000..e283d250 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface { + GetPodServices(pod *v1.Pod) ([]*v1.Service, error) +} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} + +// TODO: Move this back to scheduler as a helper function that takes a Store, +// rather than a method of ServiceLister. +func (s *serviceLister) GetPodServices(pod *v1.Pod) ([]*v1.Service, error) { + allServices, err := s.Services(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var services []*v1.Service + for i := range allServices { + service := allServices[i] + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(pod.Labels)) { + services = append(services, service) + } + } + + return services, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go new file mode 100644 index 00000000..2245d5d4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceAccountLister helps list ServiceAccounts. +type ServiceAccountLister interface { + // List lists all ServiceAccounts in the indexer. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // ServiceAccounts returns an object that can list and get ServiceAccounts. + ServiceAccounts(namespace string) ServiceAccountNamespaceLister + ServiceAccountListerExpansion +} + +// serviceAccountLister implements the ServiceAccountLister interface. +type serviceAccountLister struct { + indexer cache.Indexer +} + +// NewServiceAccountLister returns a new ServiceAccountLister. +func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { + return &serviceAccountLister{indexer: indexer} +} + +// List lists all ServiceAccounts in the indexer. +func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// ServiceAccounts returns an object that can list and get ServiceAccounts. +func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { + return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceAccountNamespaceLister helps list and get ServiceAccounts. +type ServiceAccountNamespaceLister interface { + // List lists all ServiceAccounts in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // Get retrieves the ServiceAccount from the indexer for a given namespace and name. + Get(name string) (*v1.ServiceAccount, error) + ServiceAccountNamespaceListerExpansion +} + +// serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister +// interface. +type serviceAccountNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceAccounts in the indexer for a given namespace. +func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// Get retrieves the ServiceAccount from the indexer for a given namespace and name. +func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) + } + return obj.(*v1.ServiceAccount), nil +} diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go new file mode 100644 index 00000000..bca3c452 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("event"), name) + } + return obj.(*v1beta1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go new file mode 100644 index 00000000..7e8fb62b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go new file mode 100644 index 00000000..4672a5cb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1beta1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("daemonset"), name) + } + return obj.(*v1beta1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go new file mode 100644 index 00000000..336a4ed8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error) { + var selector labels.Selector + var daemonSet *v1beta1.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*v1beta1.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*v1beta1.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go new file mode 100644 index 00000000..4c17085d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) + } + return obj.(*v1beta1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 00000000..b9a14167 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) ([]*extensions.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) ([]*extensions.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*extensions.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go new file mode 100644 index 00000000..060c7a35 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// IngressListerExpansion allows custom methods to be added to +// IngressLister. +type IngressListerExpansion interface{} + +// IngressNamespaceListerExpansion allows custom methods to be added to +// IngressNamespaceLister. +type IngressNamespaceListerExpansion interface{} + +// PodSecurityPolicyListerExpansion allows custom methods to be added to +// PodSecurityPolicyLister. +type PodSecurityPolicyListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go new file mode 100644 index 00000000..5615dfcc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IngressLister helps list Ingresses. +type IngressLister interface { + // List lists all Ingresses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Ingresses returns an object that can list and get Ingresses. + Ingresses(namespace string) IngressNamespaceLister + IngressListerExpansion +} + +// ingressLister implements the IngressLister interface. +type ingressLister struct { + indexer cache.Indexer +} + +// NewIngressLister returns a new IngressLister. +func NewIngressLister(indexer cache.Indexer) IngressLister { + return &ingressLister{indexer: indexer} +} + +// List lists all Ingresses in the indexer. +func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Ingresses returns an object that can list and get Ingresses. +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { + return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IngressNamespaceLister helps list and get Ingresses. +type IngressNamespaceLister interface { + // List lists all Ingresses in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Get retrieves the Ingress from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Ingress, error) + IngressNamespaceListerExpansion +} + +// ingressNamespaceLister implements the IngressNamespaceLister +// interface. +type ingressNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Ingresses in the indexer for a given namespace. +func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Get retrieves the Ingress from the indexer for a given namespace and name. +func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) + } + return obj.(*v1beta1.Ingress), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000..3189ff7c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodSecurityPolicyLister helps list PodSecurityPolicies. +type PodSecurityPolicyLister interface { + // List lists all PodSecurityPolicies in the indexer. + List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) + // Get retrieves the PodSecurityPolicy from the index for a given name. + Get(name string) (*v1beta1.PodSecurityPolicy, error) + PodSecurityPolicyListerExpansion +} + +// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. +type podSecurityPolicyLister struct { + indexer cache.Indexer +} + +// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. +func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { + return &podSecurityPolicyLister{indexer: indexer} +} + +// List lists all PodSecurityPolicies in the indexer. +func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) + }) + return ret, err +} + +// Get retrieves the PodSecurityPolicy from the index for a given name. +func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) + } + return obj.(*v1beta1.PodSecurityPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go new file mode 100644 index 00000000..44de996e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1beta1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("replicaset"), name) + } + return obj.(*v1beta1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go new file mode 100644 index 00000000..1f72644c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*extensions.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go new file mode 100644 index 00000000..a027af82 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go new file mode 100644 index 00000000..91fe5e77 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// NetworkPolicyListerExpansion allows custom methods to be added to +// NetworkPolicyLister. +type NetworkPolicyListerExpansion interface{} + +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to +// NetworkPolicyNamespaceLister. +type NetworkPolicyNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go new file mode 100644 index 00000000..59e17eec --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyLister helps list NetworkPolicies. +type NetworkPolicyLister interface { + // List lists all NetworkPolicies in the indexer. + List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + // NetworkPolicies returns an object that can list and get NetworkPolicies. + NetworkPolicies(namespace string) NetworkPolicyNamespaceLister + NetworkPolicyListerExpansion +} + +// networkPolicyLister implements the NetworkPolicyLister interface. +type networkPolicyLister struct { + indexer cache.Indexer +} + +// NewNetworkPolicyLister returns a new NetworkPolicyLister. +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { + return &networkPolicyLister{indexer: indexer} +} + +// List lists all NetworkPolicies in the indexer. +func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkPolicy)) + }) + return ret, err +} + +// NetworkPolicies returns an object that can list and get NetworkPolicies. +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { + return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies. +type NetworkPolicyNamespaceLister interface { + // List lists all NetworkPolicies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. + Get(name string) (*v1.NetworkPolicy, error) + NetworkPolicyNamespaceListerExpansion +} + +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister +// interface. +type networkPolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all NetworkPolicies in the indexer for a given namespace. +func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkPolicy)) + }) + return ret, err +} + +// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. +func (s networkPolicyNamespaceLister) Get(name string) (*v1.NetworkPolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("networkpolicy"), name) + } + return obj.(*v1.NetworkPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go new file mode 100644 index 00000000..742775f6 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EvictionLister helps list Evictions. +type EvictionLister interface { + // List lists all Evictions in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + // Evictions returns an object that can list and get Evictions. + Evictions(namespace string) EvictionNamespaceLister + EvictionListerExpansion +} + +// evictionLister implements the EvictionLister interface. +type evictionLister struct { + indexer cache.Indexer +} + +// NewEvictionLister returns a new EvictionLister. +func NewEvictionLister(indexer cache.Indexer) EvictionLister { + return &evictionLister{indexer: indexer} +} + +// List lists all Evictions in the indexer. +func (s *evictionLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Eviction)) + }) + return ret, err +} + +// Evictions returns an object that can list and get Evictions. +func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { + return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EvictionNamespaceLister helps list and get Evictions. +type EvictionNamespaceLister interface { + // List lists all Evictions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + // Get retrieves the Eviction from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Eviction, error) + EvictionNamespaceListerExpansion +} + +// evictionNamespaceLister implements the EvictionNamespaceLister +// interface. +type evictionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Evictions in the indexer for a given namespace. +func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Eviction)) + }) + return ret, err +} + +// Get retrieves the Eviction from the indexer for a given namespace and name. +func (s evictionNamespaceLister) Get(name string) (*v1beta1.Eviction, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("eviction"), name) + } + return obj.(*v1beta1.Eviction), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go new file mode 100644 index 00000000..4785fbc0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// EvictionListerExpansion allows custom methods to be added to +// EvictionLister. +type EvictionListerExpansion interface{} + +// EvictionNamespaceListerExpansion allows custom methods to be added to +// EvictionNamespaceLister. +type EvictionNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 00000000..6512f29f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodDisruptionBudgetLister helps list PodDisruptionBudgets. +type PodDisruptionBudgetLister interface { + // List lists all PodDisruptionBudgets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. + PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister + PodDisruptionBudgetListerExpansion +} + +// podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. +type podDisruptionBudgetLister struct { + indexer cache.Indexer +} + +// NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. +func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { + return &podDisruptionBudgetLister{indexer: indexer} +} + +// List lists all PodDisruptionBudgets in the indexer. +func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) + }) + return ret, err +} + +// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. +func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { + return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. +type PodDisruptionBudgetNamespaceLister interface { + // List lists all PodDisruptionBudgets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. + Get(name string) (*v1beta1.PodDisruptionBudget, error) + PodDisruptionBudgetNamespaceListerExpansion +} + +// podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister +// interface. +type podDisruptionBudgetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodDisruptionBudgets in the indexer for a given namespace. +func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) + }) + return ret, err +} + +// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. +func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1beta1.PodDisruptionBudget, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("poddisruptionbudget"), name) + } + return obj.(*v1beta1.PodDisruptionBudget), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go new file mode 100644 index 00000000..c0ab9d3e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// PodDisruptionBudgetListerExpansion allows custom methods to be added to +// PodDisruptionBudgetLister. +type PodDisruptionBudgetListerExpansion interface { + GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) +} + +// PodDisruptionBudgetNamespaceListerExpansion allows custom methods to be added to +// PodDisruptionBudgetNamespaceLister. +type PodDisruptionBudgetNamespaceListerExpansion interface{} + +// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod. Returns an error only if no matching PodDisruptionBudgets are found. +func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) { + var selector labels.Selector + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no PodDisruptionBudgets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.PodDisruptionBudgets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var pdbList []*policy.PodDisruptionBudget + for i := range list { + pdb := list[i] + selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) + if err != nil { + glog.Warningf("invalid selector: %v", err) + // TODO(mml): add an event to the PDB + continue + } + + // If a PDB with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + pdbList = append(pdbList, pdb) + } + + if len(pdbList) == 0 { + return nil, fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return pdbList, nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go new file mode 100644 index 00000000..5dc9a225 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterrole"), name) + } + return obj.(*v1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go new file mode 100644 index 00000000..bb3186a0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterrolebinding"), name) + } + return obj.(*v1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go new file mode 100644 index 00000000..4d9872d3 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go new file mode 100644 index 00000000..8d7625db --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("role"), name) + } + return obj.(*v1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go new file mode 100644 index 00000000..b8209d85 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("rolebinding"), name) + } + return obj.(*v1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go new file mode 100644 index 00000000..9e20a6d1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1alpha1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterrole"), name) + } + return obj.(*v1alpha1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 00000000..155666ab --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1alpha1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterrolebinding"), name) + } + return obj.(*v1alpha1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..0ab4fb99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go new file mode 100644 index 00000000..72ab79c9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1alpha1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("role"), name) + } + return obj.(*v1alpha1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go new file mode 100644 index 00000000..7f9cfd45 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1alpha1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("rolebinding"), name) + } + return obj.(*v1alpha1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go new file mode 100644 index 00000000..65ec3eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1beta1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("clusterrole"), name) + } + return obj.(*v1beta1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 00000000..146f2d7f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1beta1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("clusterrolebinding"), name) + } + return obj.(*v1beta1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go new file mode 100644 index 00000000..b6eeae83 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go new file mode 100644 index 00000000..b795e98b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1beta1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("role"), name) + } + return obj.(*v1beta1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go new file mode 100644 index 00000000..d27ea2eb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1beta1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1beta1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("rolebinding"), name) + } + return obj.(*v1beta1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..8a644c80 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// PriorityClassListerExpansion allows custom methods to be added to +// PriorityClassLister. +type PriorityClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 00000000..9ed04fd2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityClassLister helps list PriorityClasses. +type PriorityClassLister interface { + // List lists all PriorityClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) + // Get retrieves the PriorityClass from the index for a given name. + Get(name string) (*v1alpha1.PriorityClass, error) + PriorityClassListerExpansion +} + +// priorityClassLister implements the PriorityClassLister interface. +type priorityClassLister struct { + indexer cache.Indexer +} + +// NewPriorityClassLister returns a new PriorityClassLister. +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { + return &priorityClassLister{indexer: indexer} +} + +// List lists all PriorityClasses in the indexer. +func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PriorityClass)) + }) + return ret, err +} + +// Get retrieves the PriorityClass from the index for a given name. +func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("priorityclass"), name) + } + return obj.(*v1alpha1.PriorityClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..7a5ce38e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// PodPresetListerExpansion allows custom methods to be added to +// PodPresetLister. +type PodPresetListerExpansion interface{} + +// PodPresetNamespaceListerExpansion allows custom methods to be added to +// PodPresetNamespaceLister. +type PodPresetNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go new file mode 100644 index 00000000..18f62249 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodPresetLister helps list PodPresets. +type PodPresetLister interface { + // List lists all PodPresets in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) + // PodPresets returns an object that can list and get PodPresets. + PodPresets(namespace string) PodPresetNamespaceLister + PodPresetListerExpansion +} + +// podPresetLister implements the PodPresetLister interface. +type podPresetLister struct { + indexer cache.Indexer +} + +// NewPodPresetLister returns a new PodPresetLister. +func NewPodPresetLister(indexer cache.Indexer) PodPresetLister { + return &podPresetLister{indexer: indexer} +} + +// List lists all PodPresets in the indexer. +func (s *podPresetLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodPreset)) + }) + return ret, err +} + +// PodPresets returns an object that can list and get PodPresets. +func (s *podPresetLister) PodPresets(namespace string) PodPresetNamespaceLister { + return podPresetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodPresetNamespaceLister helps list and get PodPresets. +type PodPresetNamespaceLister interface { + // List lists all PodPresets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) + // Get retrieves the PodPreset from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.PodPreset, error) + PodPresetNamespaceListerExpansion +} + +// podPresetNamespaceLister implements the PodPresetNamespaceLister +// interface. +type podPresetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodPresets in the indexer for a given namespace. +func (s podPresetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodPreset)) + }) + return ret, err +} + +// Get retrieves the PodPreset from the indexer for a given namespace and name. +func (s podPresetNamespaceLister) Get(name string) (*v1alpha1.PodPreset, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("podpreset"), name) + } + return obj.(*v1alpha1.PodPreset), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go new file mode 100644 index 00000000..2353b59d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// StorageClassListerExpansion allows custom methods to be added to +// StorageClassLister. +type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go new file mode 100644 index 00000000..7c37321f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageClassLister helps list StorageClasses. +type StorageClassLister interface { + // List lists all StorageClasses in the indexer. + List(selector labels.Selector) (ret []*v1.StorageClass, err error) + // Get retrieves the StorageClass from the index for a given name. + Get(name string) (*v1.StorageClass, error) + StorageClassListerExpansion +} + +// storageClassLister implements the StorageClassLister interface. +type storageClassLister struct { + indexer cache.Indexer +} + +// NewStorageClassLister returns a new StorageClassLister. +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { + return &storageClassLister{indexer: indexer} +} + +// List lists all StorageClasses in the indexer. +func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StorageClass)) + }) + return ret, err +} + +// Get retrieves the StorageClass from the index for a given name. +func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("storageclass"), name) + } + return obj.(*v1.StorageClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..63abe94a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..02004629 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1alpha1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) + } + return obj.(*v1alpha1.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go new file mode 100644 index 00000000..84e0f9c4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// StorageClassListerExpansion allows custom methods to be added to +// StorageClassLister. +type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go new file mode 100644 index 00000000..9253319b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageClassLister helps list StorageClasses. +type StorageClassLister interface { + // List lists all StorageClasses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) + // Get retrieves the StorageClass from the index for a given name. + Get(name string) (*v1beta1.StorageClass, error) + StorageClassListerExpansion +} + +// storageClassLister implements the StorageClassLister interface. +type storageClassLister struct { + indexer cache.Indexer +} + +// NewStorageClassLister returns a new StorageClassLister. +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { + return &storageClassLister{indexer: indexer} +} + +// List lists all StorageClasses in the indexer. +func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StorageClass)) + }) + return ret, err +} + +// Get retrieves the StorageClass from the index for a given name. +func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("storageclass"), name) + } + return obj.(*v1beta1.StorageClass), nil +} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index c377705f..7ab0ed3a 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -51,9 +51,13 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes gitVersion string = "v0.0.0-master+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 97ec03e0..aac126cc 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -179,6 +179,24 @@ func (r *Request) Resource(resource string) *Request { return r } +// BackOff sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided +func (r *Request) BackOff(manager BackoffManager) *Request { + if manager == nil { + r.backoffMgr = &NoBackoff{} + return r + } + + r.backoffMgr = manager + return r +} + +// Throttle receives a rate-limiter and sets or replaces an existing request limiter +func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { + r.throttle = limiter + return r +} + // SubResource sets a sub-resource path which can be multiple segments segment after the resource // name but before the suffix. func (r *Request) SubResource(subresources ...string) *Request { @@ -827,6 +845,8 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { + case bool(glog.V(10)): + return body case bool(glog.V(9)): max = 10240 case bool(glog.V(8)): diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index 1632e1ef..59050fc4 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package rest -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TLSClientConfig).DeepCopyInto(out.(*TLSClientConfig)) - return nil - }, InType: reflect.TypeOf(&TLSClientConfig{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 4967f98d..a97b5f98 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -109,7 +109,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, r := &Reflector{ name: name, // we need this to be unique per process (some names are still the same)but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPromethusMetricName(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -120,9 +120,9 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPromethusMetricName(in string) string { +func makeValidPromethusMetricLabel(in string) string { // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_").Replace(in) + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -295,6 +295,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { }() for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options = metav1.ListOptions{ ResourceVersion: resourceVersion, diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 93a891e9..0a081871 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index b787f0dd..51668f05 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -21,48 +21,9 @@ limitations under the License. package api import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthInfo).DeepCopyInto(out.(*AuthInfo)) - return nil - }, InType: reflect.TypeOf(&AuthInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthProviderConfig).DeepCopyInto(out.(*AuthProviderConfig)) - return nil - }, InType: reflect.TypeOf(&AuthProviderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Config).DeepCopyInto(out.(*Config)) - return nil - }, InType: reflect.TypeOf(&Config{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Context).DeepCopyInto(out.(*Context)) - return nil - }, InType: reflect.TypeOf(&Context{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preferences).DeepCopyInto(out.(*Preferences)) - return nil - }, InType: reflect.TypeOf(&Preferences{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index bcbe9fcd..6b69f366 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport" spdy "k8s.io/client-go/transport/spdy" ) @@ -72,8 +71,18 @@ type streamExecutor struct { // NewSPDYExecutor connects to the provided server and upgrades the connection to // multiplexed bidirectional streams. func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { return NewSPDYExecutorForProtocols( - config, method, url, + transport, upgrader, method, url, remotecommand.StreamProtocolV4Name, remotecommand.StreamProtocolV3Name, remotecommand.StreamProtocolV2Name, @@ -83,16 +92,11 @@ func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Ex // NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to // multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor. -func NewSPDYExecutorForProtocols(config *restclient.Config, method string, url *url.URL, protocols ...string) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - wrapper = transport.DebugWrappers(wrapper) +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { return &streamExecutor{ - upgrader: upgradeRoundTripper, - transport: wrapper, + upgrader: upgrader, + transport: transport, method: method, url: url, protocols: protocols, diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 561c92c1..da22cdee 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -88,5 +88,5 @@ func tlsConfigKey(c *Config) (string, error) { return "", err } // Only include the things that actually affect the tls.Config - return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil + return fmt.Sprintf("%v/%x/%x/%x/%v", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData, c.TLS.ServerName), nil } diff --git a/vendor/k8s.io/kube-openapi/README.md b/vendor/k8s.io/kube-openapi/README.md index 1b24a782..babadde1 100644 --- a/vendor/k8s.io/kube-openapi/README.md +++ b/vendor/k8s.io/kube-openapi/README.md @@ -1,14 +1,14 @@ # Kube OpenAPI -This repo is the home for Kubernetes OpenAPI discovery spec generation. The goal -is to support a subset of OpenAPI features to satisfy kubernetes use-cases but -implement that subset with little to no assumption about the structure of the -code or routes. Thus, there should be no kubernetes specific code in this repo. +This repo is the home for Kubernetes OpenAPI discovery spec generation. The goal +is to support a subset of OpenAPI features to satisfy kubernetes use-cases but +implement that subset with little to no assumption about the structure of the +code or routes. Thus, there should be no kubernetes specific code in this repo. -There are two main parts: - - A model generator that goes through .go files, find and generate model -definitions. - - The spec generator that is responsible for dynamically generate -the final OpenAPI spec using web service routes or combining other +There are two main parts: + - A model generator that goes through .go files, find and generate model +definitions. + - The spec generator that is responsible for dynamically generate +the final OpenAPI spec using web service routes or combining other OpenAPI/Json specs. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 00000000..11ed8a6b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 00000000..5f607c76 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetAdditionalProperties().GetSchema() == nil { + return nil, newSchemaError(path, "invalid object doesn't have additional properties") + } + sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: + case Number: + case Integer: + case Boolean: + case "": // Some models are completely empty, and can be safely ignored. + // Do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) == 1 { + t := s.GetType().GetValue()[0] + switch t { + case object: + return d.parseMap(s, path) + case array: + return d.parseArray(s, path) + } + + } + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + if s.GetProperties() != nil { + return d.parseKind(s, path) + } + return d.parsePrimitive(s, path) +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 00000000..02ab06d6 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,251 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go index 242aab77..5de5f276 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -18,10 +18,9 @@ package service import ( "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" netsets "k8s.io/kubernetes/pkg/util/net/sets" + "strings" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go deleted file mode 100644 index e16af3ad..00000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go +++ /dev/null @@ -1,5430 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api" - unsafe "unsafe" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - Convert_v1_Affinity_To_api_Affinity, - Convert_api_Affinity_To_v1_Affinity, - Convert_v1_AttachedVolume_To_api_AttachedVolume, - Convert_api_AttachedVolume_To_v1_AttachedVolume, - Convert_v1_AvoidPods_To_api_AvoidPods, - Convert_api_AvoidPods_To_v1_AvoidPods, - Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, - Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, - Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource, - Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, - Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - Convert_v1_Binding_To_api_Binding, - Convert_api_Binding_To_v1_Binding, - Convert_v1_Capabilities_To_api_Capabilities, - Convert_api_Capabilities_To_v1_Capabilities, - Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource, - Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, - Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - Convert_v1_ClientIPConfig_To_api_ClientIPConfig, - Convert_api_ClientIPConfig_To_v1_ClientIPConfig, - Convert_v1_ComponentCondition_To_api_ComponentCondition, - Convert_api_ComponentCondition_To_v1_ComponentCondition, - Convert_v1_ComponentStatus_To_api_ComponentStatus, - Convert_api_ComponentStatus_To_v1_ComponentStatus, - Convert_v1_ComponentStatusList_To_api_ComponentStatusList, - Convert_api_ComponentStatusList_To_v1_ComponentStatusList, - Convert_v1_ConfigMap_To_api_ConfigMap, - Convert_api_ConfigMap_To_v1_ConfigMap, - Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, - Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, - Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - Convert_v1_ConfigMapList_To_api_ConfigMapList, - Convert_api_ConfigMapList_To_v1_ConfigMapList, - Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, - Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, - Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - Convert_v1_Container_To_api_Container, - Convert_api_Container_To_v1_Container, - Convert_v1_ContainerImage_To_api_ContainerImage, - Convert_api_ContainerImage_To_v1_ContainerImage, - Convert_v1_ContainerPort_To_api_ContainerPort, - Convert_api_ContainerPort_To_v1_ContainerPort, - Convert_v1_ContainerState_To_api_ContainerState, - Convert_api_ContainerState_To_v1_ContainerState, - Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - Convert_v1_ContainerStatus_To_api_ContainerStatus, - Convert_api_ContainerStatus_To_v1_ContainerStatus, - Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - Convert_v1_DeleteOptions_To_api_DeleteOptions, - Convert_api_DeleteOptions_To_v1_DeleteOptions, - Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, - Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, - Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - Convert_v1_EndpointAddress_To_api_EndpointAddress, - Convert_api_EndpointAddress_To_v1_EndpointAddress, - Convert_v1_EndpointPort_To_api_EndpointPort, - Convert_api_EndpointPort_To_v1_EndpointPort, - Convert_v1_EndpointSubset_To_api_EndpointSubset, - Convert_api_EndpointSubset_To_v1_EndpointSubset, - Convert_v1_Endpoints_To_api_Endpoints, - Convert_api_Endpoints_To_v1_Endpoints, - Convert_v1_EndpointsList_To_api_EndpointsList, - Convert_api_EndpointsList_To_v1_EndpointsList, - Convert_v1_EnvFromSource_To_api_EnvFromSource, - Convert_api_EnvFromSource_To_v1_EnvFromSource, - Convert_v1_EnvVar_To_api_EnvVar, - Convert_api_EnvVar_To_v1_EnvVar, - Convert_v1_EnvVarSource_To_api_EnvVarSource, - Convert_api_EnvVarSource_To_v1_EnvVarSource, - Convert_v1_Event_To_api_Event, - Convert_api_Event_To_v1_Event, - Convert_v1_EventList_To_api_EventList, - Convert_api_EventList_To_v1_EventList, - Convert_v1_EventSource_To_api_EventSource, - Convert_api_EventSource_To_v1_EventSource, - Convert_v1_ExecAction_To_api_ExecAction, - Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_FCVolumeSource_To_api_FCVolumeSource, - Convert_api_FCVolumeSource_To_v1_FCVolumeSource, - Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - Convert_v1_HTTPGetAction_To_api_HTTPGetAction, - Convert_api_HTTPGetAction_To_v1_HTTPGetAction, - Convert_v1_HTTPHeader_To_api_HTTPHeader, - Convert_api_HTTPHeader_To_v1_HTTPHeader, - Convert_v1_Handler_To_api_Handler, - Convert_api_Handler_To_v1_Handler, - Convert_v1_HostAlias_To_api_HostAlias, - Convert_api_HostAlias_To_v1_HostAlias, - Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - Convert_v1_KeyToPath_To_api_KeyToPath, - Convert_api_KeyToPath_To_v1_KeyToPath, - Convert_v1_Lifecycle_To_api_Lifecycle, - Convert_api_Lifecycle_To_v1_Lifecycle, - Convert_v1_LimitRange_To_api_LimitRange, - Convert_api_LimitRange_To_v1_LimitRange, - Convert_v1_LimitRangeItem_To_api_LimitRangeItem, - Convert_api_LimitRangeItem_To_v1_LimitRangeItem, - Convert_v1_LimitRangeList_To_api_LimitRangeList, - Convert_api_LimitRangeList_To_v1_LimitRangeList, - Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - Convert_v1_List_To_api_List, - Convert_api_List_To_v1_List, - Convert_v1_ListOptions_To_api_ListOptions, - Convert_api_ListOptions_To_v1_ListOptions, - Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - Convert_v1_LocalObjectReference_To_api_LocalObjectReference, - Convert_api_LocalObjectReference_To_v1_LocalObjectReference, - Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource, - Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource, - Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - Convert_v1_Namespace_To_api_Namespace, - Convert_api_Namespace_To_v1_Namespace, - Convert_v1_NamespaceList_To_api_NamespaceList, - Convert_api_NamespaceList_To_v1_NamespaceList, - Convert_v1_NamespaceSpec_To_api_NamespaceSpec, - Convert_api_NamespaceSpec_To_v1_NamespaceSpec, - Convert_v1_NamespaceStatus_To_api_NamespaceStatus, - Convert_api_NamespaceStatus_To_v1_NamespaceStatus, - Convert_v1_Node_To_api_Node, - Convert_api_Node_To_v1_Node, - Convert_v1_NodeAddress_To_api_NodeAddress, - Convert_api_NodeAddress_To_v1_NodeAddress, - Convert_v1_NodeAffinity_To_api_NodeAffinity, - Convert_api_NodeAffinity_To_v1_NodeAffinity, - Convert_v1_NodeCondition_To_api_NodeCondition, - Convert_api_NodeCondition_To_v1_NodeCondition, - Convert_v1_NodeConfigSource_To_api_NodeConfigSource, - Convert_api_NodeConfigSource_To_v1_NodeConfigSource, - Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - Convert_v1_NodeList_To_api_NodeList, - Convert_api_NodeList_To_v1_NodeList, - Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - Convert_v1_NodeResources_To_api_NodeResources, - Convert_api_NodeResources_To_v1_NodeResources, - Convert_v1_NodeSelector_To_api_NodeSelector, - Convert_api_NodeSelector_To_v1_NodeSelector, - Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, - Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, - Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, - Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, - Convert_v1_NodeSpec_To_api_NodeSpec, - Convert_api_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeStatus_To_api_NodeStatus, - Convert_api_NodeStatus_To_v1_NodeStatus, - Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - Convert_v1_ObjectMeta_To_api_ObjectMeta, - Convert_api_ObjectMeta_To_v1_ObjectMeta, - Convert_v1_ObjectReference_To_api_ObjectReference, - Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_PersistentVolume_To_api_PersistentVolume, - Convert_api_PersistentVolume_To_v1_PersistentVolume, - Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition, - Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, - Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, - Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, - Convert_v1_Pod_To_api_Pod, - Convert_api_Pod_To_v1_Pod, - Convert_v1_PodAffinity_To_api_PodAffinity, - Convert_api_PodAffinity_To_v1_PodAffinity, - Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, - Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, - Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, - Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, - Convert_v1_PodAttachOptions_To_api_PodAttachOptions, - Convert_api_PodAttachOptions_To_v1_PodAttachOptions, - Convert_v1_PodCondition_To_api_PodCondition, - Convert_api_PodCondition_To_v1_PodCondition, - Convert_v1_PodExecOptions_To_api_PodExecOptions, - Convert_api_PodExecOptions_To_v1_PodExecOptions, - Convert_v1_PodList_To_api_PodList, - Convert_api_PodList_To_v1_PodList, - Convert_v1_PodLogOptions_To_api_PodLogOptions, - Convert_api_PodLogOptions_To_v1_PodLogOptions, - Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, - Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, - Convert_v1_PodProxyOptions_To_api_PodProxyOptions, - Convert_api_PodProxyOptions_To_v1_PodProxyOptions, - Convert_v1_PodSecurityContext_To_api_PodSecurityContext, - Convert_api_PodSecurityContext_To_v1_PodSecurityContext, - Convert_v1_PodSignature_To_api_PodSignature, - Convert_api_PodSignature_To_v1_PodSignature, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_api_PodStatus, - Convert_api_PodStatus_To_v1_PodStatus, - Convert_v1_PodStatusResult_To_api_PodStatusResult, - Convert_api_PodStatusResult_To_v1_PodStatusResult, - Convert_v1_PodTemplate_To_api_PodTemplate, - Convert_api_PodTemplate_To_v1_PodTemplate, - Convert_v1_PodTemplateList_To_api_PodTemplateList, - Convert_api_PodTemplateList_To_v1_PodTemplateList, - Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, - Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, - Convert_v1_Preconditions_To_api_Preconditions, - Convert_api_Preconditions_To_v1_Preconditions, - Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, - Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, - Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, - Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, - Convert_v1_Probe_To_api_Probe, - Convert_api_Probe_To_v1_Probe, - Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, - Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, - Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, - Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, - Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource, - Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource, - Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - Convert_v1_RangeAllocation_To_api_RangeAllocation, - Convert_api_RangeAllocation_To_v1_RangeAllocation, - Convert_v1_ReplicationController_To_api_ReplicationController, - Convert_api_ReplicationController_To_v1_ReplicationController, - Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, - Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, - Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, - Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, - Convert_v1_ResourceQuota_To_api_ResourceQuota, - Convert_api_ResourceQuota_To_v1_ResourceQuota, - Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - Convert_v1_ResourceRequirements_To_api_ResourceRequirements, - Convert_api_ResourceRequirements_To_v1_ResourceRequirements, - Convert_v1_SELinuxOptions_To_api_SELinuxOptions, - Convert_api_SELinuxOptions_To_v1_SELinuxOptions, - Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource, - Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource, - Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, - Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, - Convert_v1_Secret_To_api_Secret, - Convert_api_Secret_To_v1_Secret, - Convert_v1_SecretEnvSource_To_api_SecretEnvSource, - Convert_api_SecretEnvSource_To_v1_SecretEnvSource, - Convert_v1_SecretKeySelector_To_api_SecretKeySelector, - Convert_api_SecretKeySelector_To_v1_SecretKeySelector, - Convert_v1_SecretList_To_api_SecretList, - Convert_api_SecretList_To_v1_SecretList, - Convert_v1_SecretProjection_To_api_SecretProjection, - Convert_api_SecretProjection_To_v1_SecretProjection, - Convert_v1_SecretReference_To_api_SecretReference, - Convert_api_SecretReference_To_v1_SecretReference, - Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - Convert_v1_SecurityContext_To_api_SecurityContext, - Convert_api_SecurityContext_To_v1_SecurityContext, - Convert_v1_SerializedReference_To_api_SerializedReference, - Convert_api_SerializedReference_To_v1_SerializedReference, - Convert_v1_Service_To_api_Service, - Convert_api_Service_To_v1_Service, - Convert_v1_ServiceAccount_To_api_ServiceAccount, - Convert_api_ServiceAccount_To_v1_ServiceAccount, - Convert_v1_ServiceAccountList_To_api_ServiceAccountList, - Convert_api_ServiceAccountList_To_v1_ServiceAccountList, - Convert_v1_ServiceList_To_api_ServiceList, - Convert_api_ServiceList_To_v1_ServiceList, - Convert_v1_ServicePort_To_api_ServicePort, - Convert_api_ServicePort_To_v1_ServicePort, - Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_ServiceStatus_To_api_ServiceStatus, - Convert_api_ServiceStatus_To_v1_ServiceStatus, - Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig, - Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig, - Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource, - Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, - Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource, - Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, - Convert_v1_Sysctl_To_api_Sysctl, - Convert_api_Sysctl_To_v1_Sysctl, - Convert_v1_TCPSocketAction_To_api_TCPSocketAction, - Convert_api_TCPSocketAction_To_v1_TCPSocketAction, - Convert_v1_Taint_To_api_Taint, - Convert_api_Taint_To_v1_Taint, - Convert_v1_Toleration_To_api_Toleration, - Convert_api_Toleration_To_v1_Toleration, - Convert_v1_Volume_To_api_Volume, - Convert_api_Volume_To_v1_Volume, - Convert_v1_VolumeMount_To_api_VolumeMount, - Convert_api_VolumeMount_To_v1_VolumeMount, - Convert_v1_VolumeProjection_To_api_VolumeProjection, - Convert_api_VolumeProjection_To_v1_VolumeProjection, - Convert_v1_VolumeSource_To_api_VolumeSource, - Convert_api_VolumeSource_To_v1_VolumeSource, - Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, - Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, - Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, - Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, - ) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_v1_Affinity_To_api_Affinity is an autogenerated conversion function. -func Convert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) -} - -func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_api_Affinity_To_v1_Affinity is an autogenerated conversion function. -func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) -} - -func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - out.Name = api.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_v1_AttachedVolume_To_api_AttachedVolume is an autogenerated conversion function. -func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) -} - -func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - out.Name = v1.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_api_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. -func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) -} - -func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_v1_AvoidPods_To_api_AvoidPods is an autogenerated conversion function. -func Convert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) -} - -func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_api_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. -func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) -} - -func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*api.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Binding_To_api_Binding is an autogenerated conversion function. -func Convert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_api_Binding_To_v1_Binding is an autogenerated conversion function. -func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - return autoConvert_api_Binding_To_v1_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_v1_Capabilities_To_api_Capabilities is an autogenerated conversion function. -func Convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_api_Capabilities_To_v1_Capabilities is an autogenerated conversion function. -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource is an autogenerated conversion function. -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ClientIPConfig_To_api_ClientIPConfig is an autogenerated conversion function. -func Convert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - return autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in, out, s) -} - -func autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. -func Convert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - return autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_v1_ComponentCondition_To_api_ComponentCondition is an autogenerated conversion function. -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - out.Type = v1.ComponentConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_api_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. -func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ComponentStatus_To_api_ComponentStatus is an autogenerated conversion function. -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. -func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ComponentStatusList_To_api_ComponentStatusList is an autogenerated conversion function. -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. -func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_ConfigMap_To_api_ConfigMap is an autogenerated conversion function. -func Convert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. -func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ConfigMapList_To_api_ConfigMapList is an autogenerated conversion function. -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection is an autogenerated conversion function. -func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) -} - -func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. -func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_v1_Container_To_api_Container is an autogenerated conversion function. -func Convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_api_Container_To_v1_Container is an autogenerated conversion function. -func Convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_v1_ContainerImage_To_api_ContainerImage is an autogenerated conversion function. -func Convert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_api_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. -func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_v1_ContainerPort_To_api_ContainerPort is an autogenerated conversion function. -func Convert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = v1.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_api_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_v1_ContainerState_To_api_ContainerState is an autogenerated conversion function. -func Convert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_api_ContainerState_To_v1_ContainerState is an autogenerated conversion function. -func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning is an autogenerated conversion function. -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. -func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated is an autogenerated conversion function. -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. -func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting is an autogenerated conversion function. -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. -func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStatus_To_api_ContainerStatus is an autogenerated conversion function. -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. -func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint is an autogenerated conversion function. -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_v1_DeleteOptions_To_api_DeleteOptions is an autogenerated conversion function. -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_api_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection is an autogenerated conversion function. -func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) -} - -func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. -func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = api.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = v1.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_v1_EndpointAddress_To_api_EndpointAddress is an autogenerated conversion function. -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_api_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. -func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -// Convert_v1_EndpointPort_To_api_EndpointPort is an autogenerated conversion function. -func Convert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = v1.Protocol(in.Protocol) - return nil -} - -// Convert_api_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. -func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_EndpointSubset_To_api_EndpointSubset is an autogenerated conversion function. -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. -func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_v1_Endpoints_To_api_Endpoints is an autogenerated conversion function. -func Convert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_api_Endpoints_To_v1_Endpoints is an autogenerated conversion function. -func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EndpointsList_To_api_EndpointsList is an autogenerated conversion function. -func Convert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. -func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_EnvFromSource_To_api_EnvFromSource is an autogenerated conversion function. -func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) -} - -func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. -func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_v1_EnvVar_To_api_EnvVar is an autogenerated conversion function. -func Convert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_api_EnvVar_To_v1_EnvVar is an autogenerated conversion function. -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_v1_EnvVarSource_To_api_EnvVarSource is an autogenerated conversion function. -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_api_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_v1_Event_To_api_Event is an autogenerated conversion function. -func Convert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_api_Event_To_v1_Event is an autogenerated conversion function. -func Convert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EventList_To_api_EventList is an autogenerated conversion function. -func Convert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EventList_To_v1_EventList is an autogenerated conversion function. -func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - return autoConvert_api_EventList_To_v1_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_v1_EventSource_To_api_EventSource is an autogenerated conversion function. -func Convert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_api_EventSource_To_v1_EventSource is an autogenerated conversion function. -func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_ExecAction_To_api_ExecAction is an autogenerated conversion function. -func Convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_ExecAction_To_v1_ExecAction is an autogenerated conversion function. -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_v1_FCVolumeSource_To_api_FCVolumeSource is an autogenerated conversion function. -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_api_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource is an autogenerated conversion function. -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource is an autogenerated conversion function. -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_v1_HTTPGetAction_To_api_HTTPGetAction is an autogenerated conversion function. -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_api_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_HTTPHeader_To_api_HTTPHeader is an autogenerated conversion function. -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_v1_Handler_To_api_Handler is an autogenerated conversion function. -func Convert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_api_Handler_To_v1_Handler is an autogenerated conversion function. -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_v1_HostAlias_To_api_HostAlias is an autogenerated conversion function. -func Convert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - return autoConvert_v1_HostAlias_To_api_HostAlias(in, out, s) -} - -func autoConvert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_api_HostAlias_To_v1_HostAlias is an autogenerated conversion function. -func Convert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - return autoConvert_api_HostAlias_To_v1_HostAlias(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*api.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource is an autogenerated conversion function. -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_KeyToPath_To_api_KeyToPath is an autogenerated conversion function. -func Convert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_v1_Lifecycle_To_api_Lifecycle is an autogenerated conversion function. -func Convert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_api_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1_LimitRange_To_api_LimitRange is an autogenerated conversion function. -func Convert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_api_LimitRange_To_v1_LimitRange is an autogenerated conversion function. -func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - out.Type = api.LimitType(in.Type) - out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_v1_LimitRangeItem_To_api_LimitRangeItem is an autogenerated conversion function. -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - out.Type = v1.LimitType(in.Type) - out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_api_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. -func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_LimitRangeList_To_api_LimitRangeList is an autogenerated conversion function. -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. -func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec is an autogenerated conversion function. -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. -func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_List_To_api_List is an autogenerated conversion function. -func Convert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_List_To_v1_List is an autogenerated conversion function. -func Convert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - return autoConvert_api_List_To_v1_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ListOptions_To_api_ListOptions is an autogenerated conversion function. -func Convert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ListOptions_To_v1_ListOptions is an autogenerated conversion function. -func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress is an autogenerated conversion function. -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus is an autogenerated conversion function. -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_v1_LocalObjectReference_To_api_LocalObjectReference is an autogenerated conversion function. -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_api_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource is an autogenerated conversion function. -func Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in, out, s) -} - -func autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. -func Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource is an autogenerated conversion function. -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Namespace_To_api_Namespace is an autogenerated conversion function. -func Convert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Namespace_To_v1_Namespace is an autogenerated conversion function. -func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NamespaceList_To_api_NamespaceList is an autogenerated conversion function. -func Convert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_v1_NamespaceSpec_To_api_NamespaceSpec is an autogenerated conversion function. -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_api_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -// Convert_v1_NamespaceStatus_To_api_NamespaceStatus is an autogenerated conversion function. -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - out.Phase = v1.NamespacePhase(in.Phase) - return nil -} - -// Convert_api_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. -func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Node_To_api_Node is an autogenerated conversion function. -func Convert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Node_To_v1_Node is an autogenerated conversion function. -func Convert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - return autoConvert_api_Node_To_v1_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_v1_NodeAddress_To_api_NodeAddress is an autogenerated conversion function. -func Convert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - out.Type = v1.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_api_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. -func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_NodeAffinity_To_api_NodeAffinity is an autogenerated conversion function. -func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) -} - -func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. -func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_NodeCondition_To_api_NodeCondition is an autogenerated conversion function. -func Convert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - out.Type = v1.NodeConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. -func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*api.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_v1_NodeConfigSource_To_api_NodeConfigSource is an autogenerated conversion function. -func Convert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - return autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in, out, s) -} - -func autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_api_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. -func Convert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - return autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NodeList_To_api_NodeList is an autogenerated conversion function. -func Convert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NodeList_To_v1_NodeList is an autogenerated conversion function. -func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions is an autogenerated conversion function. -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. -func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_v1_NodeResources_To_api_NodeResources is an autogenerated conversion function. -func Convert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) -} - -func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_api_NodeResources_To_v1_NodeResources is an autogenerated conversion function. -func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) -} - -func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_v1_NodeSelector_To_api_NodeSelector is an autogenerated conversion function. -func Convert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) -} - -func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_api_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. -func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) -} - -func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm is an autogenerated conversion function. -func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) -} - -func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. -func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*api.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_v1_NodeSpec_To_api_NodeSpec is an autogenerated conversion function. -func Convert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) -} - -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_api_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) -} - -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = api.NodePhase(in.Phase) - out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_v1_NodeStatus_To_api_NodeStatus is an autogenerated conversion function. -func Convert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = v1.NodePhase(in.Phase) - out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_api_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo is an autogenerated conversion function. -func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector is an autogenerated conversion function. -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_v1_ObjectMeta_To_api_ObjectMeta is an autogenerated conversion function. -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_api_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectReference_To_api_ObjectReference is an autogenerated conversion function. -func Convert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolume_To_api_PersistentVolume is an autogenerated conversion function. -func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = api.PersistentVolumeClaimConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = v1.PersistentVolumeClaimConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]api.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*api.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.AzureFile = (*api.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*api.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*api.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Pod_To_api_Pod is an autogenerated conversion function. -func Convert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_api_Pod(in, out, s) -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAffinity_To_api_PodAffinity is an autogenerated conversion function. -func Convert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) -} - -func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. -func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) -} - -func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm is an autogenerated conversion function. -func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) -} - -func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. -func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) -} - -func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity is an autogenerated conversion function. -func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) -} - -func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. -func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) -} - -func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_v1_PodAttachOptions_To_api_PodAttachOptions is an autogenerated conversion function. -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_api_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - out.Type = api.PodConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PodCondition_To_api_PodCondition is an autogenerated conversion function. -func Convert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - out.Type = v1.PodConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PodCondition_To_v1_PodCondition is an autogenerated conversion function. -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_PodExecOptions_To_api_PodExecOptions is an autogenerated conversion function. -func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodList_To_api_PodList is an autogenerated conversion function. -func Convert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_api_PodList(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Pod, len(*in)) - for i := range *in { - if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodList_To_v1_PodList is an autogenerated conversion function. -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_v1_PodLogOptions_To_api_PodLogOptions is an autogenerated conversion function. -func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_api_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions is an autogenerated conversion function. -func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) -} - -func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. -func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) -} - -func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_PodProxyOptions_To_api_PodProxyOptions is an autogenerated conversion function. -func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_v1_PodSignature_To_api_PodSignature is an autogenerated conversion function. -func Convert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) -} - -func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_api_PodSignature_To_v1_PodSignature is an autogenerated conversion function. -func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - // INFO: in.DeprecatedServiceAccount opted out of conversion generation - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.PodSecurityContext) - if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]api.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - out.Phase = api.PodPhase(in.Phase) - out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - out.QOSClass = api.PodQOSClass(in.QOSClass) - return nil -} - -// Convert_v1_PodStatus_To_api_PodStatus is an autogenerated conversion function. -func Convert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - out.Phase = v1.PodPhase(in.Phase) - out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.QOSClass = v1.PodQOSClass(in.QOSClass) - out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - return nil -} - -// Convert_api_PodStatus_To_v1_PodStatus is an autogenerated conversion function. -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodStatusResult_To_api_PodStatusResult is an autogenerated conversion function. -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodTemplate_To_api_PodTemplate is an autogenerated conversion function. -func Convert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodTemplateList_To_api_PodTemplateList is an autogenerated conversion function. -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource is an autogenerated conversion function. -func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) -} - -func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. -func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) -} - -func autoConvert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_v1_Preconditions_To_api_Preconditions is an autogenerated conversion function. -func Convert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) -} - -func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_api_Preconditions_To_v1_Preconditions is an autogenerated conversion function. -func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) -} - -func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_v1_Probe_To_api_Probe is an autogenerated conversion function. -func Convert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_api_Probe_To_v1_Probe is an autogenerated conversion function. -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_RangeAllocation_To_api_RangeAllocation is an autogenerated conversion function. -func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ReplicationController_To_api_ReplicationController is an autogenerated conversion function. -func Convert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = api.ReplicationControllerConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = v1.ReplicationControllerConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList is an autogenerated conversion function. -func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector is an autogenerated conversion function. -func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) -} - -func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. -func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) -} - -func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ResourceQuota_To_api_ResourceQuota is an autogenerated conversion function. -func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList is an autogenerated conversion function. -func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_v1_ResourceRequirements_To_api_ResourceRequirements is an autogenerated conversion function. -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_api_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_v1_SELinuxOptions_To_api_SELinuxOptions is an autogenerated conversion function. -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_api_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - // INFO: in.StringData opted out of conversion generation - out.Type = api.SecretType(in.Type) - return nil -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - out.Type = v1.SecretType(in.Type) - return nil -} - -// Convert_api_Secret_To_v1_Secret is an autogenerated conversion function. -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretEnvSource_To_api_SecretEnvSource is an autogenerated conversion function. -func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) -} - -func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. -func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretKeySelector_To_api_SecretKeySelector is an autogenerated conversion function. -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Secret, len(*in)) - for i := range *in { - if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_SecretList_To_api_SecretList is an autogenerated conversion function. -func Convert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Secret, len(*in)) - for i := range *in { - if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_SecretList_To_v1_SecretList is an autogenerated conversion function. -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretProjection_To_api_SecretProjection is an autogenerated conversion function. -func Convert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) -} - -func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. -func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) -} - -func autoConvert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1_SecretReference_To_api_SecretReference is an autogenerated conversion function. -func Convert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - return autoConvert_v1_SecretReference_To_api_SecretReference(in, out, s) -} - -func autoConvert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_api_SecretReference_To_v1_SecretReference is an autogenerated conversion function. -func Convert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - return autoConvert_api_SecretReference_To_v1_SecretReference(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource is an autogenerated conversion function. -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -// Convert_v1_SecurityContext_To_api_SecurityContext is an autogenerated conversion function. -func Convert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_SerializedReference_To_api_SerializedReference is an autogenerated conversion function. -func Convert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_api_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Service_To_api_Service is an autogenerated conversion function. -func Convert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - return autoConvert_v1_Service_To_api_Service(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Service_To_v1_Service is an autogenerated conversion function. -func Convert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_v1_ServiceAccount_To_api_ServiceAccount is an autogenerated conversion function. -func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_api_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ServiceAccountList_To_api_ServiceAccountList is an autogenerated conversion function. -func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Service, len(*in)) - for i := range *in { - if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ServiceList_To_api_ServiceList is an autogenerated conversion function. -func Convert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Service, len(*in)) - for i := range *in { - if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ServiceList_To_v1_ServiceList is an autogenerated conversion function. -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = api.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_v1_ServicePort_To_api_ServicePort is an autogenerated conversion function. -func Convert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = v1.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_api_ServicePort_To_v1_ServicePort is an autogenerated conversion function. -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions is an autogenerated conversion function. -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.Type = api.ServiceType(in.Type) - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) - out.LoadBalancerIP = in.LoadBalancerIP - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalName = in.ExternalName - out.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - out.SessionAffinityConfig = (*api.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - return nil -} - -// Convert_v1_ServiceSpec_To_api_ServiceSpec is an autogenerated conversion function. -func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - return autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s) -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - out.Type = v1.ServiceType(in.Type) - out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.ExternalName = in.ExternalName - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) - out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - return nil -} - -// Convert_api_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. -func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - return autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s) -} - -func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ServiceStatus_To_api_ServiceStatus is an autogenerated conversion function. -func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_api_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*api.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig is an autogenerated conversion function. -func Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in, out, s) -} - -func autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. -func Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) -} - -func autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_Sysctl_To_api_Sysctl is an autogenerated conversion function. -func Convert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) -} - -func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_Sysctl_To_v1_Sysctl is an autogenerated conversion function. -func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_v1_TCPSocketAction_To_api_TCPSocketAction is an autogenerated conversion function. -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_api_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) - return nil -} - -// Convert_v1_Taint_To_api_Taint is an autogenerated conversion function. -func Convert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - return autoConvert_v1_Taint_To_api_Taint(in, out, s) -} - -func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) - return nil -} - -// Convert_api_Taint_To_v1_Taint is an autogenerated conversion function. -func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - return autoConvert_api_Taint_To_v1_Taint(in, out, s) -} - -func autoConvert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_v1_Toleration_To_api_Toleration is an autogenerated conversion function. -func Convert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) -} - -func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_api_Toleration_To_v1_Toleration is an autogenerated conversion function. -func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Volume_To_api_Volume is an autogenerated conversion function. -func Convert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_api_Volume_To_v1_Volume is an autogenerated conversion function. -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*api.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_v1_VolumeMount_To_api_VolumeMount is an autogenerated conversion function. -func Convert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_api_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_v1_VolumeProjection_To_api_VolumeProjection is an autogenerated conversion function. -func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) -} - -func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_api_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. -func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*api.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_VolumeSource_To_api_VolumeSource is an autogenerated conversion function. -func Convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) -} - -func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go deleted file mode 100644 index 4aec88d5..00000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/legacyscheme" -) - -// ValidateEvent makes sure that the event makes sense. -func ValidateEvent(event *api.Event) field.ErrorList { - allErrs := field.ErrorList{} - - // Make sure event.Namespace and the involvedObject.Namespace agree - if len(event.InvolvedObject.Namespace) == 0 { - // event.Namespace must also be empty (or "default", for compatibility with old clients) - if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } else { - // event namespace must match - if event.Namespace != event.InvolvedObject.Namespace { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } - - // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds - if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { - if namespaced && len(event.InvolvedObject.Namespace) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) - } - if !namespaced && len(event.InvolvedObject.Namespace) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) - } - } - - for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) - } - return allErrs -} - -// Check whether the kind in groupVersion is scoped at the root of the api hierarchy -func isNamespacedKind(kind, groupVersion string) (bool, error) { - gv, err := schema.ParseGroupVersion(groupVersion) - if err != nil { - return false, err - } - g, err := legacyscheme.Registry.Group(gv.Group) - if err != nil { - return false, err - } - - restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version) - if err != nil { - return false, err - } - scopeName := restMapping.Scope.Name() - if scopeName == meta.RESTScopeNameNamespace { - return true, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go new file mode 100644 index 00000000..ccf03453 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2beta1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions +// of an HPA when converting the `Conditions` field from autoscaling/v2beta1 +const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2beta1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go new file mode 100644 index 00000000..7c91aac8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go new file mode 100644 index 00000000..6c321a3a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go new file mode 100644 index 00000000..0e9d669a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -0,0 +1,363 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector string +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + Kind string + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string + // API version of the referent + // +optional + APIVersion string +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // TargetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // MetricName is the name of the metric in question + MetricName string + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // TargetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 + // TargetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // ObservedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 + + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 + + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +var ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // Type describes the current condition + Type HorizontalPodAutoscalerConditionType + // Status is the status of the condition (True, False, Unknown) + Status ConditionStatus + // LastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time + // Reason is the reason for the condition's last transition. + // +optional + Reason string + // Message is a human-readable explanation containing details about + // the transition + // +optional + Message string +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // CurrentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // MetricName is the name of the metric in question + MetricName string + // CurrentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + // CurrentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 + // CurrentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec + + // Status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta + // Metadata is the standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go new file mode 100644 index 00000000..c0bccf3b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package autoscaling + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index 6a6827bb..131fdd99 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -16,7 +16,7 @@ limitations under the License. // This file should be consistent with pkg/api/v1/annotation_key_constants.go. -package api +package core const ( // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy @@ -68,6 +68,10 @@ const ( // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + // annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/api/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/doc.go index 283a83e4..6017bfda 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package api contains the latest (or "internal") version of the // Kubernetes API objects. This is the API objects as represented in memory. // The contract presented to clients is located in the versioned packages, // which are sub-directories. The first one is "v1". Those packages // describe how a particular version is serialized to storage/network. -package api // import "k8s.io/kubernetes/pkg/api" +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/api/field_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go index 5ead0f13..a26f8056 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core // Field path constants that are specific to the internal API // representation. @@ -25,8 +25,8 @@ const ( PodStatusField = "status.phase" SecretTypeField = "type" - EventReasonField = "reason" - EventSourceField = "source" + EventReasonField = "action" + EventSourceField = "reportingComponent" EventTypeField = "type" EventInvolvedKindField = "involvedObject.kind" EventInvolvedNamespaceField = "involvedObject.namespace" diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go similarity index 58% rename from vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index f352f580..ca397c8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -28,30 +28,36 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/core" ) // IsHugePageResourceName returns true if the resource name has the huge page // resource prefix. -func IsHugePageResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceHugePagesPrefix) +func IsHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) +} + +// IsQuotaHugePageResourceName returns true if the resource name has the quota +// related huge page resource prefix. +func IsQuotaHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) } // HugePageResourceName returns a ResourceName with the canonical hugepage // prefix prepended for the specified page size. The page size is converted // to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) api.ResourceName { - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceHugePagesPrefix, pageSize.String())) +func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { + return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) } // HugePageSizeFromResourceName returns the page size for the specified huge page // resource name. If the specified input is not a valid huge page resource name // an error is returned. -func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, error) { +func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { if !IsHugePageResourceName(name) { return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) } - pageSize := strings.TrimPrefix(string(name), api.ResourceHugePagesPrefix) + pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) return resource.ParseQuantity(pageSize) } @@ -60,14 +66,14 @@ func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, err func NonConvertibleFields(annotations map[string]string) map[string]string { nonConvertibleKeys := map[string]string{} for key, value := range annotations { - if strings.HasPrefix(key, api.NonConvertibleAnnotationPrefix) { + if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { nonConvertibleKeys[key] = value } } return nonConvertibleKeys } -// Semantic can do semantic deep equality checks for api objects. +// Semantic can do semantic deep equality checks for core objects. // Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true var Semantic = conversion.EqualitiesOrDie( func(a, b resource.Quantity) bool { @@ -92,10 +98,10 @@ var Semantic = conversion.EqualitiesOrDie( ) var standardResourceQuotaScopes = sets.NewString( - string(api.ResourceQuotaScopeTerminating), - string(api.ResourceQuotaScopeNotTerminating), - string(api.ResourceQuotaScopeBestEffort), - string(api.ResourceQuotaScopeNotBestEffort), + string(core.ResourceQuotaScopeTerminating), + string(core.ResourceQuotaScopeNotTerminating), + string(core.ResourceQuotaScopeBestEffort), + string(core.ResourceQuotaScopeNotBestEffort), ) // IsStandardResourceQuotaScope returns true if the scope is a standard value @@ -104,24 +110,24 @@ func IsStandardResourceQuotaScope(str string) bool { } var podObjectCountQuotaResources = sets.NewString( - string(api.ResourcePods), + string(core.ResourcePods), ) var podComputeQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), ) // IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource string) bool { +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { switch scope { - case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating, api.ResourceQuotaScopeNotBestEffort: + case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort: return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case api.ResourceQuotaScopeBestEffort: + case core.ResourceQuotaScopeBestEffort: return podObjectCountQuotaResources.Has(resource) default: return true @@ -129,62 +135,45 @@ func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource } var standardContainerResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), ) // IsStandardContainerResourceName returns true if the container can make a resource request // for the specified resource func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) + return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) } // IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. -func IsExtendedResourceName(name api.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) +// default namespace. +func IsExtendedResourceName(name core.ResourceName) bool { + return !IsDefaultNamespaceResource(name) } // IsDefaultNamespaceResource returns true if the resource name is in the // *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are // implicitly in the kubernetes.io/ namespace. -func IsDefaultNamespaceResource(name api.ResourceName) bool { +func IsDefaultNamespaceResource(name core.ResourceName) bool { return !strings.Contains(string(name), "/") || - strings.Contains(string(name), api.ResourceDefaultNamespacePrefix) + strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) } -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) api.ResourceName { - if IsOpaqueIntResourceName(api.ResourceName(name)) { - return api.ResourceName(name) - } - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) -} - -var overcommitBlacklist = sets.NewString(string(api.ResourceNvidiaGPU)) +var overcommitBlacklist = sets.NewString(string(core.ResourceNvidiaGPU)) // IsOvercommitAllowed returns true if the resource is in the default // namespace and not blacklisted. -func IsOvercommitAllowed(name api.ResourceName) bool { +func IsOvercommitAllowed(name core.ResourceName) bool { return IsDefaultNamespaceResource(name) && !IsHugePageResourceName(name) && !overcommitBlacklist.Has(string(name)) } var standardLimitRangeTypes = sets.NewString( - string(api.LimitTypePod), - string(api.LimitTypeContainer), - string(api.LimitTypePersistentVolumeClaim), + string(core.LimitTypePod), + string(core.LimitTypeContainer), + string(core.LimitTypePersistentVolumeClaim), ) // IsStandardLimitRangeType returns true if the type is Pod or Container @@ -193,112 +182,112 @@ func IsStandardLimitRangeType(str string) bool { } var standardQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsStorage), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceConfigMaps), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsStorage), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceConfigMaps), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsStandardQuotaResourceName returns true if the resource is known to // the quota tracking system func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) + return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) } var standardResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceStorage), - string(api.ResourceRequestsStorage), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceStorage), + string(core.ResourceRequestsStorage), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsStandardResourceName returns true if the resource is known to the system func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) + return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) } var integerResources = sets.NewString( - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsIntegerResourceName returns true if the resource is measured in integer values func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str)) + return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) } // Extended and HugePages resources -func IsScalarResourceName(name api.ResourceName) bool { +func IsScalarResourceName(name core.ResourceName) bool { return IsExtendedResourceName(name) || IsHugePageResourceName(name) } // this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here -func IsServiceIPSet(service *api.Service) bool { - return service.Spec.ClusterIP != api.ClusterIPNone && service.Spec.ClusterIP != "" +func IsServiceIPSet(service *core.Service) bool { + return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" } // this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *api.Service) bool { +func IsServiceIPRequested(service *core.Service) bool { // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == api.ServiceTypeExternalName { + if service.Spec.Type == core.ServiceTypeExternalName { return false } return service.Spec.ClusterIP == "" } var standardFinalizers = sets.NewString( - string(api.FinalizerKubernetes), + string(core.FinalizerKubernetes), metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents, ) // HasAnnotation returns a bool if passed in annotation exists -func HasAnnotation(obj api.ObjectMeta, ann string) bool { +func HasAnnotation(obj core.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found } // SetMetaDataAnnotation sets the annotation and value -func SetMetaDataAnnotation(obj *api.ObjectMeta, ann string, value string) { +func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) { if obj.Annotations == nil { obj.Annotations = make(map[string]string) } @@ -311,7 +300,7 @@ func IsStandardFinalizerName(str string) bool { // AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, // only if they do not already exist -func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAddress) { +func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) { for _, add := range addAddresses { exists := false for _, existing := range *addresses { @@ -327,11 +316,11 @@ func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAd } // TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *api.LoadBalancerStatus) bool { +func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { return ingressSliceEqual(l.Ingress, r.Ingress) } -func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { +func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { if len(lhs) != len(rhs) { return false } @@ -343,7 +332,7 @@ func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { return true } -func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool { +func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { if lhs.IP != rhs.IP { return false } @@ -354,9 +343,9 @@ func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool { } // TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerStatus { - c := &api.LoadBalancerStatus{} - c.Ingress = make([]api.LoadBalancerIngress, len(lb.Ingress)) +func LoadBalancerStatusDeepCopy(lb *core.LoadBalancerStatus) *core.LoadBalancerStatus { + c := &core.LoadBalancerStatus{} + c.Ingress = make([]core.LoadBalancerIngress, len(lb.Ingress)) for i := range lb.Ingress { c.Ingress[i] = lb.Ingress[i] } @@ -365,42 +354,42 @@ func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerSta // GetAccessModesAsString returns a string representation of an array of access modes. // modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string { +func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { modes = removeDuplicateAccessModes(modes) modesStr := []string{} - if containsAccessMode(modes, api.ReadWriteOnce) { + if containsAccessMode(modes, core.ReadWriteOnce) { modesStr = append(modesStr, "RWO") } - if containsAccessMode(modes, api.ReadOnlyMany) { + if containsAccessMode(modes, core.ReadOnlyMany) { modesStr = append(modesStr, "ROX") } - if containsAccessMode(modes, api.ReadWriteMany) { + if containsAccessMode(modes, core.ReadWriteMany) { modesStr = append(modesStr, "RWX") } return strings.Join(modesStr, ",") } // GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []api.PersistentVolumeAccessMode { +func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { strmodes := strings.Split(modes, ",") - accessModes := []api.PersistentVolumeAccessMode{} + accessModes := []core.PersistentVolumeAccessMode{} for _, s := range strmodes { s = strings.Trim(s, " ") switch { case s == "RWO": - accessModes = append(accessModes, api.ReadWriteOnce) + accessModes = append(accessModes, core.ReadWriteOnce) case s == "ROX": - accessModes = append(accessModes, api.ReadOnlyMany) + accessModes = append(accessModes, core.ReadOnlyMany) case s == "RWX": - accessModes = append(accessModes, api.ReadWriteMany) + accessModes = append(accessModes, core.ReadWriteMany) } } return accessModes } // removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.PersistentVolumeAccessMode { - accessModes := []api.PersistentVolumeAccessMode{} +func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { + accessModes := []core.PersistentVolumeAccessMode{} for _, m := range modes { if !containsAccessMode(accessModes, m) { accessModes = append(accessModes, m) @@ -409,7 +398,7 @@ func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.Pe return accessModes } -func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { +func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { for _, m := range modes { if m == mode { return true @@ -418,9 +407,9 @@ func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.Persist return false } -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements // labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labels.Selector, error) { +func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { if len(nsm) == 0 { return labels.Nothing(), nil } @@ -428,17 +417,17 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe for _, expr := range nsm { var op selection.Operator switch expr.Operator { - case api.NodeSelectorOpIn: + case core.NodeSelectorOpIn: op = selection.In - case api.NodeSelectorOpNotIn: + case core.NodeSelectorOpNotIn: op = selection.NotIn - case api.NodeSelectorOpExists: + case core.NodeSelectorOpExists: op = selection.Exists - case api.NodeSelectorOpDoesNotExist: + case core.NodeSelectorOpDoesNotExist: op = selection.DoesNotExist - case api.NodeSelectorOpGt: + case core.NodeSelectorOpGt: op = selection.GreaterThan - case api.NodeSelectorOpLt: + case core.NodeSelectorOpLt: op = selection.LessThan default: return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) @@ -453,11 +442,11 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe } // GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in api. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Toleration, error) { - var tolerations []api.Toleration - if len(annotations) > 0 && annotations[api.TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TolerationsAnnotationKey]), &tolerations) +// and converts it to the []Toleration type in core. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { + var tolerations []core.Toleration + if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) if err != nil { return tolerations, err } @@ -467,10 +456,10 @@ func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Tole // AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. // Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool { +func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { podTolerations := pod.Spec.Tolerations - var newTolerations []api.Toleration + var newTolerations []core.Toleration updated := false for i := range podTolerations { if toleration.MatchToleration(&podTolerations[i]) { @@ -494,7 +483,7 @@ func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool { } // TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool { +func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool { if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { return false } @@ -503,17 +492,17 @@ func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool return false } // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == api.TolerationOpEqual) && toleration.Value == taint.Value { + if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value { return true } - if toleration.Operator == api.TolerationOpExists { + if toleration.Operator == core.TolerationOpExists { return true } return false } // TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) bool { +func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool { tolerated := false for i := range tolerations { if TolerationToleratesTaint(&tolerations[i], taint) { @@ -525,13 +514,13 @@ func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) } // GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, error) { - var taints []api.Taint - if len(annotations) > 0 && annotations[api.TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TaintsAnnotationKey]), &taints) +// and converts it to the []Taint type in core. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { + var taints []core.Taint + if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) if err != nil { - return []api.Taint{}, err + return []core.Taint{}, err } } return taints, nil @@ -540,12 +529,12 @@ func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, e // SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls // and a slice of unsafe Sysctls. This is only a convenience wrapper around // SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[api.SysctlsPodAnnotationKey]) +func SysctlsFromPodAnnotations(a map[string]string) ([]core.Sysctl, []core.Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[core.SysctlsPodAnnotationKey]) if err != nil { return nil, nil, err } - unsafe, err := SysctlsFromPodAnnotation(a[api.UnsafeSysctlsPodAnnotationKey]) + unsafe, err := SysctlsFromPodAnnotation(a[core.UnsafeSysctlsPodAnnotationKey]) if err != nil { return nil, nil, err } @@ -554,13 +543,13 @@ func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, } // SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) { +func SysctlsFromPodAnnotation(annotation string) ([]core.Sysctl, error) { if len(annotation) == 0 { return nil, nil } kvs := strings.Split(annotation, ",") - sysctls := make([]api.Sysctl, len(kvs)) + sysctls := make([]core.Sysctl, len(kvs)) for i, kv := range kvs { cs := strings.Split(kv, "=") if len(cs) != 2 || len(cs[0]) == 0 { @@ -573,7 +562,7 @@ func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) { } // PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string { +func PodAnnotationsFromSysctls(sysctls []core.Sysctl) string { if len(sysctls) == 0 { return "" } @@ -586,9 +575,9 @@ func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string { } // GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *api.PersistentVolume) string { +func GetPersistentVolumeClass(volume *core.PersistentVolume) string { // Use beta annotation first - if class, found := volume.Annotations[api.BetaStorageClassAnnotation]; found { + if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { return class } @@ -597,9 +586,9 @@ func GetPersistentVolumeClass(volume *api.PersistentVolume) string { // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". -func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string { +func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { // Use beta annotation first - if class, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { + if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { return class } @@ -611,9 +600,9 @@ func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string { } // PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool { +func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { // Use beta annotation first - if _, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { + if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { return true } @@ -625,12 +614,12 @@ func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool { } // GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations -// and converts it to the NodeAffinity type in api. +// and converts it to the NodeAffinity type in core. // TODO: update when storage node affinity graduates to beta -func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.NodeAffinity, error) { - if len(annotations) > 0 && annotations[api.AlphaStorageNodeAffinityAnnotation] != "" { - var affinity api.NodeAffinity - err := json.Unmarshal([]byte(annotations[api.AlphaStorageNodeAffinityAnnotation]), &affinity) +func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*core.NodeAffinity, error) { + if len(annotations) > 0 && annotations[core.AlphaStorageNodeAffinityAnnotation] != "" { + var affinity core.NodeAffinity + err := json.Unmarshal([]byte(annotations[core.AlphaStorageNodeAffinityAnnotation]), &affinity) if err != nil { return nil, err } @@ -641,7 +630,7 @@ func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.N // Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes // TODO: update when storage node affinity graduates to beta -func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *api.NodeAffinity) error { +func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *core.NodeAffinity) error { if affinity == nil { return nil } @@ -650,6 +639,6 @@ func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinit if err != nil { return err } - annotations[api.AlphaStorageNodeAffinityAnnotation] = string(json) + annotations[core.AlphaStorageNodeAffinityAnnotation] = string(json) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/api/install/install.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go index 5fbffe23..cae514ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go @@ -23,9 +23,9 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" ) func init() { @@ -36,9 +36,9 @@ func init() { func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ - GroupName: api.GroupName, + GroupName: core.GroupName, VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, - AddInternalObjectsToScheme: api.AddToScheme, + AddInternalObjectsToScheme: core.AddToScheme, RootScopedKinds: sets.NewString( "Node", "Namespace", @@ -56,9 +56,6 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r "PodProxyOptions", "NodeProxyOptions", "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList", ), }, announced.VersionToSchemeFunc{ diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/json.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/json.go index 3a6e04c1..937cd056 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/json.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import "encoding/json" diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/objectreference.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go index b36ecab2..55b27f30 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import ( "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go new file mode 100644 index 00000000..cf199cee --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/fieldpath" +) + +// ConvertDownwardAPIFieldLabel converts the specified downward API field label +// and its value in the pod of the specified version to the internal version, +// and returns the converted label and value. This function returns an error if +// the conversion fails. +func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) { + if version != "v1" { + return "", "", fmt.Errorf("unsupported pod version: %s", version) + } + + if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok { + switch path { + case "metadata.annotations", "metadata.labels": + return label, value, nil + default: + return "", "", fmt.Errorf("field label does not support subscript: %s", label) + } + } + + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "spec.schedulerName", + "status.phase", + "status.hostIP", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/register.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/register.go index 8029a957..2784cbe1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/resource.go index ce1747d8..1910cd92 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( "k8s.io/apimachinery/pkg/api/resource" diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/taint.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/taint.go index 173e4e60..ae1feb74 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/taint.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import "fmt" diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/toleration.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go index edb73b74..1dfbc9f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/toleration.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , // if the two tolerations have same combination, regard as they match. diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/api/types.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/types.go index f6d7aa89..032e6d84 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( "k8s.io/apimachinery/pkg/api/resource" @@ -347,10 +347,10 @@ type PersistentVolumeSource struct { // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - ISCSI *ISCSIVolumeSource + ISCSI *ISCSIPersistentVolumeSource // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. // +optional @@ -391,6 +391,9 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource } type PersistentVolumeClaimVolumeSource struct { @@ -404,7 +407,7 @@ type PersistentVolumeClaimVolumeSource struct { const ( // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility + // It's deprecated and will be removed in a future release. (#51440) BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" // MountOptionAnnotation defines mount option annotation used in PVs @@ -459,6 +462,11 @@ type PersistentVolumeSpec struct { // simply fail if one is invalid. // +optional MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes @@ -476,6 +484,16 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim // +optional @@ -545,6 +563,11 @@ type PersistentVolumeClaimSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 // +optional StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode } type PersistentVolumeClaimConditionType string @@ -770,6 +793,54 @@ type ISCSIVolumeSource struct { InitiatorName *string } +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + // Represents a Fibre Channel volume. // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. @@ -1503,6 +1574,23 @@ type LocalVolumeSource struct { Path string } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool +} + // ContainerPort represents a network port in a single container type ContainerPort struct { // Optional: If specified, this must be an IANA_SVC_NAME Each named port @@ -1530,7 +1618,9 @@ type VolumeMount struct { // Optional: Defaults to false (read-write). // +optional ReadOnly bool - // Required. Must not contain ':'. + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. MountPath string // Path within the volume from which the container's volume should be mounted. // Defaults to "" (volume's root). @@ -1564,6 +1654,14 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Required: This must be a C_IDENTIFIER. @@ -1857,6 +1955,10 @@ type Container struct { Resources ResourceRequirements // +optional VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +optional + VolumeDevices []VolumeDevice // +optional LivenessProbe *Probe // +optional @@ -2081,6 +2183,11 @@ const ( // DNSDefault indicates that the pod should use the default (as // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" ) // A node selector represents the union of the results of one or more label queries @@ -2380,7 +2487,12 @@ type PodSpec struct { // before the system actively tries to terminate the pod; value must be positive integer // +optional ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional DNSPolicy DNSPolicy // NodeSelector is a selector which must be true for the pod to fit on a node @@ -2444,6 +2556,11 @@ type PodSpec struct { // The higher the value, the higher the priority. // +optional Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -2533,6 +2650,35 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -2725,8 +2871,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -2916,7 +3062,8 @@ type ServiceSpec struct { // ExternalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. ExternalName string // ExternalIPs are used by external load balancers, or can be set by @@ -3406,8 +3553,6 @@ const ( ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" // Default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" // Name prefix for huge page resources (alpha). @@ -3575,6 +3720,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation } @@ -3809,7 +3958,7 @@ type Event struct { // +optional metav1.ObjectMeta - // Required. The object that this event is about. + // Required. The object that this event is about. Mapped to events.Event.regarding // +optional InvolvedObject ObjectReference @@ -3821,7 +3970,7 @@ type Event struct { Reason string // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. + // TODO: decide on maximum length. Mapped to events.Event.note // +optional Message string @@ -3844,8 +3993,49 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future. // +optional Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string } +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EventList is a list of events. @@ -3964,6 +4154,13 @@ const ( ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" ) +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + // A ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string @@ -4350,5 +4547,5 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 + DefaultHardPodAffinitySymmetricWeight int32 = 1 ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go similarity index 72% rename from vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index c5b962a7..8f6e09b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -38,79 +38,79 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error { switch a := objA.(type) { case *v1.Pod: switch b := objB.(type) { - case *api.Pod: - return true, Convert_v1_Pod_To_api_Pod(a, b, s) + case *core.Pod: + return true, Convert_v1_Pod_To_core_Pod(a, b, s) } - case *api.Pod: + case *core.Pod: switch b := objB.(type) { case *v1.Pod: - return true, Convert_api_Pod_To_v1_Pod(a, b, s) + return true, Convert_core_Pod_To_v1_Pod(a, b, s) } case *v1.Event: switch b := objB.(type) { - case *api.Event: - return true, Convert_v1_Event_To_api_Event(a, b, s) + case *core.Event: + return true, Convert_v1_Event_To_core_Event(a, b, s) } - case *api.Event: + case *core.Event: switch b := objB.(type) { case *v1.Event: - return true, Convert_api_Event_To_v1_Event(a, b, s) + return true, Convert_core_Event_To_v1_Event(a, b, s) } case *v1.ReplicationController: switch b := objB.(type) { - case *api.ReplicationController: - return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + case *core.ReplicationController: + return true, Convert_v1_ReplicationController_To_core_ReplicationController(a, b, s) } - case *api.ReplicationController: + case *core.ReplicationController: switch b := objB.(type) { case *v1.ReplicationController: - return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + return true, Convert_core_ReplicationController_To_v1_ReplicationController(a, b, s) } case *v1.Node: switch b := objB.(type) { - case *api.Node: - return true, Convert_v1_Node_To_api_Node(a, b, s) + case *core.Node: + return true, Convert_v1_Node_To_core_Node(a, b, s) } - case *api.Node: + case *core.Node: switch b := objB.(type) { case *v1.Node: - return true, Convert_api_Node_To_v1_Node(a, b, s) + return true, Convert_core_Node_To_v1_Node(a, b, s) } case *v1.Namespace: switch b := objB.(type) { - case *api.Namespace: - return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) + case *core.Namespace: + return true, Convert_v1_Namespace_To_core_Namespace(a, b, s) } - case *api.Namespace: + case *core.Namespace: switch b := objB.(type) { case *v1.Namespace: - return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) + return true, Convert_core_Namespace_To_v1_Namespace(a, b, s) } case *v1.Service: switch b := objB.(type) { - case *api.Service: - return true, Convert_v1_Service_To_api_Service(a, b, s) + case *core.Service: + return true, Convert_v1_Service_To_core_Service(a, b, s) } - case *api.Service: + case *core.Service: switch b := objB.(type) { case *v1.Service: - return true, Convert_api_Service_To_v1_Service(a, b, s) + return true, Convert_core_Service_To_v1_Service(a, b, s) } case *v1.Endpoints: switch b := objB.(type) { - case *api.Endpoints: - return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + case *core.Endpoints: + return true, Convert_v1_Endpoints_To_core_Endpoints(a, b, s) } - case *api.Endpoints: + case *core.Endpoints: switch b := objB.(type) { case *v1.Endpoints: - return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + return true, Convert_core_Endpoints_To_v1_Endpoints(a, b, s) } case *metav1.WatchEvent: @@ -132,16 +132,16 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error { func addConversionFuncs(scheme *runtime.Scheme) error { // Add non-generated conversion functions err := scheme.AddConversionFuncs( - Convert_api_Pod_To_v1_Pod, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_api_Pod, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_v1_Secret_To_api_Secret, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_v1_ResourceList_To_api_ResourceList, + Convert_core_Pod_To_v1_Pod, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_core_Pod, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_v1_Secret_To_core_Secret, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_v1_ResourceList_To_core_ResourceList, Convert_v1_ReplicationController_to_extensions_ReplicaSet, Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, @@ -157,20 +157,15 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err = scheme.AddFieldLabelConversionFunc("v1", "Pod", func(label, value string) (string, string, error) { switch label { - case "metadata.annotations", - "metadata.labels", - "metadata.name", + case "metadata.name", "metadata.namespace", - "metadata.uid", "spec.nodeName", "spec.restartPolicy", - "spec.serviceAccountName", "spec.schedulerName", "status.phase", - "status.hostIP", "status.podIP": return label, value, nil - // This is for backwards compatibility with old v1 clients which send spec.host + // This is for backwards compatibility with old v1 clients which send spec.host case "spec.host": return "spec.nodeName", value, nil default: @@ -241,7 +236,7 @@ func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.Re metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) } if in.Template != nil { - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil { return err } } @@ -257,7 +252,7 @@ func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v for _, cond := range in.Conditions { out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{ Type: extensions.ReplicaSetConditionType(cond.Type), - Status: api.ConditionStatus(cond.Status), + Status: core.ConditionStatus(cond.Status), LastTransitionTime: cond.LastTransitionTime, Reason: cond.Reason, Message: cond.Message, @@ -293,7 +288,7 @@ func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *exten invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) } out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { return err } return invalidErr @@ -317,13 +312,13 @@ func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *e return nil } -func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector if in.Template != nil { out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { @@ -332,15 +327,15 @@ func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *a return nil } -func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + out.Template = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { @@ -349,16 +344,16 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v return nil } -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { +func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { return err } return nil } -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { +func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil { return err } @@ -367,8 +362,8 @@ func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, o // The following two v1.PodSpec conversions are done here to support v1.ServiceAccount // as an alias for ServiceAccountName. -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { +func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil { return err } @@ -386,8 +381,8 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conve return nil } -func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { +func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil { return err } @@ -400,7 +395,7 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve // the host namespace fields have to be handled specially for backward compatibility // with v1.0.0 if out.SecurityContext == nil { - out.SecurityContext = new(api.PodSecurityContext) + out.SecurityContext = new(core.PodSecurityContext) } out.SecurityContext.HostNetwork = in.HostNetwork out.SecurityContext.HostPID = in.HostPID @@ -409,8 +404,8 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve return nil } -func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { +func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil { return err } @@ -431,8 +426,8 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) err return nil } -func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { +func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil { return err } @@ -448,10 +443,10 @@ func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversio return nil } -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { +func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { if in.Capabilities != nil { out.Capabilities = new(v1.Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { + if err := Convert_core_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { return err } } else { @@ -460,7 +455,7 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out.Privileged = in.Privileged if in.SELinuxOptions != nil { out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -473,11 +468,11 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, return nil } -func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { +func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -489,11 +484,11 @@ func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurity return nil } -func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { +func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + out.SELinuxOptions = new(core.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -506,12 +501,12 @@ func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityC } // +k8s:conversion-fn=copy-only -func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.ResourceList, s conversion.Scope) error { +func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error { if *in == nil { return nil } if *out == nil { - *out = make(api.ResourceList, len(*in)) + *out = make(core.ResourceList, len(*in)) } for key, val := range *in { // Moved to defaults @@ -520,7 +515,7 @@ func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.R // const milliScale = -3 // val.RoundUp(milliScale) - (*out)[api.ResourceName(key)] = val + (*out)[core.ResourceName(key)] = val } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 02b1832d..c2aeafc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -20,6 +20,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/parsers" utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) @@ -39,14 +41,6 @@ func SetDefaults_ResourceList(obj *v1.ResourceList) { } } -func SetDefaults_PodExecOptions(obj *v1.PodExecOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_PodAttachOptions(obj *v1.PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true -} func SetDefaults_ReplicationController(obj *v1.ReplicationController) { var labels map[string]string if obj.Spec.Template != nil { @@ -236,17 +230,30 @@ func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) { if obj.Spec.PersistentVolumeReclaimPolicy == "" { obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } } func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) { if obj.Status.Phase == "" { obj.Status.Phase = v1.ClaimPending } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } } func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) { if obj.ISCSIInterface == "" { obj.ISCSIInterface = "default" } } +func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) { if obj.CachingMode == nil { obj.CachingMode = new(v1.AzureDataDiskCachingMode) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go similarity index 73% rename from vendor/k8s.io/kubernetes/pkg/api/v1/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go index 0f1e0d49..454e3018 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/api -// +k8s:conversion-gen-external-types=../../../vendor/k8s.io/api/core/v1 +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core +// +k8s:conversion-gen-external-types=k8s.io/api/core/v1 // +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../vendor/k8s.io/api/core/v1 +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1 // Package v1 is the v1 version of the API. -package v1 // import "k8s.io/kubernetes/pkg/api/v1" +package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index 92cacf8d..4b21aefc 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -26,14 +26,13 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api/helper" + "k8s.io/kubernetes/pkg/apis/core/helper" ) // IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. +// default namespace. func IsExtendedResourceName(name v1.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) + return !IsDefaultNamespaceResource(name) } // IsDefaultNamespaceResource returns true if the resource name is in the @@ -69,22 +68,6 @@ func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, erro return resource.ParseQuantity(pageSize) } -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) v1.ResourceName { - if IsOpaqueIntResourceName(v1.ResourceName(name)) { - return v1.ResourceName(name) - } - return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name)) -} - var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU)) // IsOvercommitAllowed returns true if the resource is in the default diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/v1/register.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go new file mode 100644 index 00000000..29102979 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -0,0 +1,5620 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + core "k8s.io/kubernetes/pkg/apis/core" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource, + Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_core_Affinity, + Convert_core_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_core_AttachedVolume, + Convert_core_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_core_AvoidPods, + Convert_core_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource, + Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource, + Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, + Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource, + Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_core_Binding, + Convert_core_Binding_To_v1_Binding, + Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource, + Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource, + Convert_v1_Capabilities_To_core_Capabilities, + Convert_core_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource, + Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, + Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource, + Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource, + Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ClientIPConfig_To_core_ClientIPConfig, + Convert_core_ClientIPConfig_To_v1_ClientIPConfig, + Convert_v1_ComponentCondition_To_core_ComponentCondition, + Convert_core_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_core_ComponentStatus, + Convert_core_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_core_ComponentStatusList, + Convert_core_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_core_ConfigMap, + Convert_core_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource, + Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector, + Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_core_ConfigMapList, + Convert_core_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection, + Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource, + Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_core_Container, + Convert_core_Container_To_v1_Container, + Convert_v1_ContainerImage_To_core_ContainerImage, + Convert_core_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_core_ContainerPort, + Convert_core_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_core_ContainerState, + Convert_core_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning, + Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated, + Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting, + Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_core_ContainerStatus, + Convert_core_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint, + Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_core_DeleteOptions, + Convert_core_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection, + Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile, + Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource, + Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource, + Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_core_EndpointAddress, + Convert_core_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_core_EndpointPort, + Convert_core_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_core_EndpointSubset, + Convert_core_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_core_Endpoints, + Convert_core_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_core_EndpointsList, + Convert_core_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_core_EnvFromSource, + Convert_core_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_core_EnvVar, + Convert_core_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_core_EnvVarSource, + Convert_core_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_core_Event, + Convert_core_Event_To_v1_Event, + Convert_v1_EventList_To_core_EventList, + Convert_core_EventList_To_v1_EventList, + Convert_v1_EventSeries_To_core_EventSeries, + Convert_core_EventSeries_To_v1_EventSeries, + Convert_v1_EventSource_To_core_EventSource, + Convert_core_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_core_ExecAction, + Convert_core_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_core_FCVolumeSource, + Convert_core_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource, + Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource, + Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource, + Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource, + Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource, + Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_core_HTTPGetAction, + Convert_core_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_core_HTTPHeader, + Convert_core_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_core_Handler, + Convert_core_Handler_To_v1_Handler, + Convert_v1_HostAlias_To_core_HostAlias, + Convert_core_HostAlias_To_v1_HostAlias, + Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource, + Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource, + Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource, + Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource, + Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_core_KeyToPath, + Convert_core_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_core_Lifecycle, + Convert_core_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_core_LimitRange, + Convert_core_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_core_LimitRangeItem, + Convert_core_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_core_LimitRangeList, + Convert_core_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec, + Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_core_List, + Convert_core_List_To_v1_List, + Convert_v1_ListOptions_To_core_ListOptions, + Convert_core_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress, + Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus, + Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_core_LocalObjectReference, + Convert_core_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource, + Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource, + Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource, + Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_core_Namespace, + Convert_core_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_core_NamespaceList, + Convert_core_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_core_NamespaceSpec, + Convert_core_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_core_NamespaceStatus, + Convert_core_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_core_Node, + Convert_core_Node_To_v1_Node, + Convert_v1_NodeAddress_To_core_NodeAddress, + Convert_core_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_core_NodeAffinity, + Convert_core_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_core_NodeCondition, + Convert_core_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeConfigSource_To_core_NodeConfigSource, + Convert_core_NodeConfigSource_To_v1_NodeConfigSource, + Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints, + Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_core_NodeList, + Convert_core_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions, + Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_core_NodeResources, + Convert_core_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_core_NodeSelector, + Convert_core_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement, + Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm, + Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_core_NodeSpec, + Convert_core_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_core_NodeStatus, + Convert_core_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo, + Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector, + Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_core_ObjectMeta, + Convert_core_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_core_ObjectReference, + Convert_core_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_core_PersistentVolume, + Convert_core_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim, + Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition, + Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, + Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList, + Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec, + Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus, + Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource, + Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList, + Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource, + Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec, + Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus, + Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource, + Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_core_Pod, + Convert_core_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_core_PodAffinity, + Convert_core_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm, + Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity, + Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_core_PodAttachOptions, + Convert_core_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_core_PodCondition, + Convert_core_PodCondition_To_v1_PodCondition, + Convert_v1_PodDNSConfig_To_core_PodDNSConfig, + Convert_core_PodDNSConfig_To_v1_PodDNSConfig, + Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption, + Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption, + Convert_v1_PodExecOptions_To_core_PodExecOptions, + Convert_core_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_core_PodList, + Convert_core_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_core_PodLogOptions, + Convert_core_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions, + Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_core_PodProxyOptions, + Convert_core_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_core_PodSecurityContext, + Convert_core_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_core_PodSignature, + Convert_core_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_core_PodStatus, + Convert_core_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_core_PodStatusResult, + Convert_core_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_core_PodTemplate, + Convert_core_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_core_PodTemplateList, + Convert_core_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec, + Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource, + Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_core_Preconditions, + Convert_core_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry, + Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm, + Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_core_Probe, + Convert_core_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource, + Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource, + Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource, + Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource, + Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource, + Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_core_RangeAllocation, + Convert_core_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_core_ReplicationController, + Convert_core_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition, + Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList, + Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus, + Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector, + Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_core_ResourceQuota, + Convert_core_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList, + Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec, + Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus, + Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_core_ResourceRequirements, + Convert_core_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_core_SELinuxOptions, + Convert_core_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource, + Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource, + Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource, + Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_core_Secret, + Convert_core_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_core_SecretEnvSource, + Convert_core_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_core_SecretKeySelector, + Convert_core_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_core_SecretList, + Convert_core_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_core_SecretProjection, + Convert_core_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretReference_To_core_SecretReference, + Convert_core_SecretReference_To_v1_SecretReference, + Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource, + Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_core_SecurityContext, + Convert_core_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_core_SerializedReference, + Convert_core_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_core_Service, + Convert_core_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_core_ServiceAccount, + Convert_core_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_core_ServiceAccountList, + Convert_core_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_core_ServiceList, + Convert_core_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_core_ServicePort, + Convert_core_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions, + Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_core_ServiceStatus, + Convert_core_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig, + Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig, + Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource, + Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, + Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource, + Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, + Convert_v1_Sysctl_To_core_Sysctl, + Convert_core_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_core_TCPSocketAction, + Convert_core_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_core_Taint, + Convert_core_Taint_To_v1_Taint, + Convert_v1_Toleration_To_core_Toleration, + Convert_core_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_core_Volume, + Convert_core_Volume_To_v1_Volume, + Convert_v1_VolumeDevice_To_core_VolumeDevice, + Convert_core_VolumeDevice_To_v1_VolumeDevice, + Convert_v1_VolumeMount_To_core_VolumeMount, + Convert_core_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_core_VolumeProjection, + Convert_core_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_core_VolumeSource, + Convert_core_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource, + Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm, + Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function. +func Convert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_core_Affinity(in, out, s) +} + +func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function. +func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + return autoConvert_core_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + out.Name = core.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function. +func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s) +} + +func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + out.Name = v1.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. +func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function. +func Convert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s) +} + +func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. +func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*core.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Binding_To_core_Binding is an autogenerated conversion function. +func Convert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_core_Binding(in, out, s) +} + +func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_core_Binding_To_v1_Binding is an autogenerated conversion function. +func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + return autoConvert_core_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function. +func Convert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s) +} + +func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function. +func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s) +} + +func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. +func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function. +func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s) +} + +func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. +func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + out.Type = core.ComponentConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function. +func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s) +} + +func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + out.Type = v1.ComponentConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. +func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function. +func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s) +} + +func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. +func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function. +func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s) +} + +func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. +func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function. +func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s) +} + +func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. +func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function. +func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s) +} + +func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. +func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function. +func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s) +} + +func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. +func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.SecurityContext) + if err := Convert_v1_SecurityContext_To_core_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_v1_Container_To_core_Container is an autogenerated conversion function. +func Convert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_core_Container(in, out, s) +} + +func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + if err := Convert_core_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_core_Container_To_v1_Container is an autogenerated conversion function. +func Convert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + return autoConvert_core_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function. +func Convert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s) +} + +func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. +func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = core.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function. +func Convert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s) +} + +func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = v1.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. +func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function. +func Convert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s) +} + +func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function. +func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function. +func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s) +} + +func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. +func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function. +func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s) +} + +func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. +func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function. +func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s) +} + +func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. +func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function. +func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s) +} + +func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. +func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function. +func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s) +} + +func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. +func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*core.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*core.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_v1_DeleteOptions_To_core_DeleteOptions is an autogenerated conversion function. +func Convert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in, out, s) +} + +func autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_core_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. +func Convert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + return autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function. +func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s) +} + +func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. +func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = core.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = v1.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function. +func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s) +} + +func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. +func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = core.Protocol(in.Protocol) + return nil +} + +// Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function. +func Convert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s) +} + +func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = v1.Protocol(in.Protocol) + return nil +} + +// Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. +func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function. +func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s) +} + +func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. +func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function. +func Convert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s) +} + +func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function. +func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function. +func Convert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s) +} + +func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. +func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function. +func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s) +} + +func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. +func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function. +func Convert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s) +} + +func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function. +func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*core.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function. +func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s) +} + +func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. +func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_core_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_v1_Event_To_core_Event is an autogenerated conversion function. +func Convert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_core_Event(in, out, s) +} + +func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*v1.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_core_Event_To_v1_Event is an autogenerated conversion function. +func Convert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + return autoConvert_core_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_core_EventList(in, out, s) +} + +func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + return autoConvert_core_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = core.EventSeriesState(in.State) + return nil +} + +// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function. +func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s) +} + +func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = v1.EventSeriesState(in.State) + return nil +} + +// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function. +func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s) +} + +func autoConvert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function. +func Convert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_core_EventSource(in, out, s) +} + +func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function. +func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + return autoConvert_core_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function. +func Convert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s) +} + +func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function. +func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function. +func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s) +} + +func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. +func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s) +} + +func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. +func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function. +func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s) +} + +func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. +func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = core.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]core.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function. +func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s) +} + +func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = v1.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. +func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function. +func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s) +} + +func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. +func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_v1_Handler_To_core_Handler is an autogenerated conversion function. +func Convert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_core_Handler(in, out, s) +} + +func autoConvert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_core_Handler_To_v1_Handler is an autogenerated conversion function. +func Convert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + return autoConvert_core_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function. +func Convert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s) +} + +func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function. +func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function. +func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s) +} + +func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. +func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function. +func Convert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s) +} + +func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. +func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + out.PostStart = (*core.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*core.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function. +func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s) +} + +func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. +func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function. +func Convert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s) +} + +func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function. +func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + out.Type = core.LimitType(in.Type) + out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*core.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*core.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*core.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function. +func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s) +} + +func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + out.Type = v1.LimitType(in.Type) + out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. +func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function. +func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s) +} + +func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. +func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function. +func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s) +} + +func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. +func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_core_List is an autogenerated conversion function. +func Convert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + return autoConvert_v1_List_To_core_List(in, out, s) +} + +func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_List_To_v1_List is an autogenerated conversion function. +func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + return autoConvert_core_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ListOptions_To_core_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_core_ListOptions(in, out, s) +} + +func autoConvert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_core_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function. +func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s) +} + +func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. +func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) +} + +func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function. +func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s) +} + +func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. +func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function. +func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s) +} + +func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. +func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function. +func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s) +} + +func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. +func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_core_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function. +func Convert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_core_Namespace(in, out, s) +} + +func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function. +func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + return autoConvert_core_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function. +func Convert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s) +} + +func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. +func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function. +func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s) +} + +func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. +func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + out.Phase = core.NamespacePhase(in.Phase) + return nil +} + +// Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function. +func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s) +} + +func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + out.Phase = v1.NamespacePhase(in.Phase) + return nil +} + +// Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. +func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_core_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Node_To_core_Node is an autogenerated conversion function. +func Convert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_core_Node(in, out, s) +} + +func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Node_To_v1_Node is an autogenerated conversion function. +func Convert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + return autoConvert_core_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + out.Type = core.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function. +func Convert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s) +} + +func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + out.Type = v1.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. +func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function. +func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s) +} + +func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. +func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + out.Type = core.NodeConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function. +func Convert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s) +} + +func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + out.Type = v1.NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. +func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*core.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function. +func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s) +} + +func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. +func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function. +func Convert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_core_NodeList(in, out, s) +} + +func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function. +func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + return autoConvert_core_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function. +func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s) +} + +func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_NodeResources_To_core_NodeResources is an autogenerated conversion function. +func Convert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_core_NodeResources(in, out, s) +} + +func autoConvert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_core_NodeResources_To_v1_NodeResources is an autogenerated conversion function. +func Convert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + return autoConvert_core_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function. +func Convert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s) +} + +func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. +func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function. +func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s) +} + +func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. +func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_v1_NodeSpec_To_core_NodeSpec is an autogenerated conversion function. +func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s) +} + +func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_core_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. +func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + return autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = core.NodePhase(in.Phase) + out.Conditions = *(*[]core.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]core.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function. +func Convert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s) +} + +func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = v1.NodePhase(in.Phase) + out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. +func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function. +func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s) +} + +func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. +func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function. +func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s) +} + +func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. +func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_v1_ObjectMeta_To_core_ObjectMeta is an autogenerated conversion function. +func Convert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in, out, s) +} + +func autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_core_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. +func Convert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + return autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s) +} + +func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function. +func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s) +} + +func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. +func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = core.PersistentVolumeClaimConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = v1.PersistentVolumeClaimConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_core_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s) +} + +func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. +func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*core.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*core.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*core.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*v1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*core.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = core.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function. +func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + return autoConvert_v1_Pod_To_core_Pod(in, out, s) +} + +func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function. +func Convert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s) +} + +func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. +func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function. +func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s) +} + +func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. +func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function. +func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s) +} + +func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. +func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function. +func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s) +} + +func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + out.Type = core.PodConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function. +func Convert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s) +} + +func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + out.Type = v1.PodConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function. +func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function. +func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s) +} + +func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]v1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function. +func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s) +} + +func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function. +func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s) +} + +func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function. +func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function. +func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s) +} + +func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_core_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodList_To_core_PodList is an autogenerated conversion function. +func Convert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_core_PodList(in, out, s) +} + +func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Pod, len(*in)) + for i := range *in { + if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodList_To_v1_PodList is an autogenerated conversion function. +func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + return autoConvert_core_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function. +func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s) +} + +func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function. +func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s) +} + +func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function. +func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s) +} + +func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function. +func Convert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s) +} + +func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function. +func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]core.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_core_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = core.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = core.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_core_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*core.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]core.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + out.Phase = core.PodPhase(in.Phase) + out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = core.PodQOSClass(in.QOSClass) + return nil +} + +// Convert_v1_PodStatus_To_core_PodStatus is an autogenerated conversion function. +func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s) +} + +func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + out.Phase = v1.PodPhase(in.Phase) + out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = v1.PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +// Convert_core_PodStatus_To_v1_PodStatus is an autogenerated conversion function. +func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + return autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) +} + +func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) +} + +func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function. +func Convert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s) +} + +func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. +func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_core_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function. +func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s) +} + +func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. +func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function. +func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s) +} + +func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. +func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function. +func Convert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s) +} + +func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function. +func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_core_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_v1_Probe_To_core_Probe is an autogenerated conversion function. +func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_core_Probe(in, out, s) +} + +func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + if err := Convert_core_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_core_Probe_To_v1_Probe is an autogenerated conversion function. +func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + return autoConvert_core_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]core.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s) +} + +func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. +func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function. +func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s) +} + +func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. +func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function. +func Convert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s) +} + +func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. +func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = core.ReplicationControllerConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = v1.ReplicationControllerConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_core_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function. +func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s) +} + +func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. +func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]core.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function. +func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s) +} + +func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. +func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function. +func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s) +} + +func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. +func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function. +func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s) +} + +func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. +func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function. +func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s) +} + +func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. +func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function. +func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s) +} + +func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. +func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = core.SecretType(in.Type) + return nil +} + +func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = v1.SecretType(in.Type) + return nil +} + +// Convert_core_Secret_To_v1_Secret is an autogenerated conversion function. +func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + return autoConvert_core_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function. +func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s) +} + +func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. +func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function. +func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s) +} + +func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. +func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_core_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function. +func Convert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_core_SecretList(in, out, s) +} + +func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Secret, len(*in)) + for i := range *in { + if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function. +func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + return autoConvert_core_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function. +func Convert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s) +} + +func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. +func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function. +func Convert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s) +} + +func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function. +func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function. +func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s) +} + +func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. +func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +// Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function. +func Convert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s) +} + +func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function. +func Convert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s) +} + +func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. +func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_core_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Service_To_core_Service is an autogenerated conversion function. +func Convert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_core_Service(in, out, s) +} + +func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Service_To_v1_Service is an autogenerated conversion function. +func Convert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + return autoConvert_core_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function. +func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s) +} + +func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. +func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function. +func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s) +} + +func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. +func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_core_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function. +func Convert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s) +} + +func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Service, len(*in)) + for i := range *in { + if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function. +func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = core.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function. +func Convert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s) +} + +func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = v1.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function. +func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function. +func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s) +} + +func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = core.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + return nil +} + +// Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function. +func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s) +} + +func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + out.Type = v1.ServiceType(in.Type) + out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) + out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + return nil +} + +// Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. +func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s) +} + +func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function. +func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s) +} + +func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. +func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function. +func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s) +} + +func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. +func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) +} + +func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function. +func Convert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s) +} + +func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function. +func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function. +func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s) +} + +func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. +func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_v1_Taint_To_core_Taint is an autogenerated conversion function. +func Convert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_core_Taint(in, out, s) +} + +func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_core_Taint_To_v1_Taint is an autogenerated conversion function. +func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + return autoConvert_core_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function. +func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s) +} + +func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function. +func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*core.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function. +func Convert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s) +} + +func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. +func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function. +func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s) +} + +func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. +func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*core.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*core.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*core.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*core.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*core.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*core.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*core.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*core.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*core.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function. +func Convert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s) +} + +func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. +func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go index 9779d2a1..c1ad46cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go @@ -46,8 +46,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error { }) scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) - scheme.AddTypeDefaultingFunc(&v1.PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*v1.PodAttachOptions)) }) - scheme.AddTypeDefaultingFunc(&v1.PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*v1.PodExecOptions)) }) scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) @@ -140,7 +138,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD) } if in.Spec.PersistentVolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) } if in.Spec.PersistentVolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) @@ -308,14 +306,6 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } -func SetObjectDefaults_PodAttachOptions(in *v1.PodAttachOptions) { - SetDefaults_PodAttachOptions(in) -} - -func SetObjectDefaults_PodExecOptions(in *v1.PodExecOptions) { - SetDefaults_PodExecOptions(in) -} - func SetObjectDefaults_PodList(in *v1.PodList) { for i := range in.Items { a := &in.Items[i] diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/api/validation/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go index 30f541de..0c1cfaab 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package validation has functions for validating the correctness of api // objects and explaining what is wrong with them when they aren't valid. -package validation // import "k8s.io/kubernetes/pkg/api/validation" +package validation // import "k8s.io/kubernetes/pkg/apis/core/validation" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go new file mode 100644 index 00000000..ab265bd7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + ReportingInstanceLengthLimit = 128 + ActionLengthLimit = 128 + ReasonLengthLimit = 128 + NoteLengthLimit = 1024 +) + +// ValidateEvent makes sure that the event makes sense. +func ValidateEvent(event *core.Event) field.ErrorList { + allErrs := field.ErrorList{} + // Because go + zeroTime := time.Time{} + + // "New" Events need to have EventTime set, so it's validating old object. + if event.EventTime.Time == zeroTime { + // Make sure event.Namespace and the involvedInvolvedObject.Namespace agree + if len(event.InvolvedObject.Namespace) == 0 { + // event.Namespace must also be empty (or "default", for compatibility with old clients) + if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } else { + // event namespace must match + if event.Namespace != event.InvolvedObject.Namespace { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } + + } else { + if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + if len(event.ReportingController) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingController"), "")) + } + for _, msg := range validation.IsQualifiedName(event.ReportingController) { + allErrs = append(allErrs, field.Invalid(field.NewPath("reportingController"), event.ReportingController, msg)) + } + if len(event.ReportingInstance) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), "")) + } + if len(event.ReportingInstance) > ReportingInstanceLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("repotingIntance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit))) + } + if len(event.Action) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("action"), "")) + } + if len(event.Action) > ActionLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit))) + } + if len(event.Reason) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reason"), "")) + } + if len(event.Reason) > ReasonLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit))) + } + if len(event.Message) > NoteLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit))) + } + } + + // For kinds we recognize, make sure InvolvedObject.Namespace is set for namespaced kinds + if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { + if namespaced && len(event.InvolvedObject.Namespace) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) + } + if !namespaced && len(event.InvolvedObject.Namespace) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) + } + } + + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) + } + return allErrs +} + +// Check whether the kind in groupVersion is scoped at the root of the api hierarchy +func isNamespacedKind(kind, groupVersion string) (bool, error) { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return false, err + } + g, err := legacyscheme.Registry.Group(gv.Group) + if err != nil { + return false, err + } + + restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version) + if err != nil { + return false, err + } + scopeName := restMapping.Scope.Name() + if scopeName == meta.RESTScopeNameNamespace { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/api/validation/validation.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 6ab51788..655d0bf2 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -33,7 +33,6 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - genericvalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" @@ -43,31 +42,33 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" - "k8s.io/kubernetes/pkg/api/legacyscheme" apiservice "k8s.io/kubernetes/pkg/api/service" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + podshelper "k8s.io/kubernetes/pkg/apis/core/pods" + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/security/apparmor" ) // TODO: delete this global variable when we enable the validation of common // fields by default. -var RepairMalformedUpdates bool = genericvalidation.RepairMalformedUpdates +var RepairMalformedUpdates bool = apimachineryvalidation.RepairMalformedUpdates const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg const isInvalidQuotaResource string = `must be a standard resource for quota` -const fieldImmutableErrorMsg string = genericvalidation.FieldImmutableErrorMsg +const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg const isNotIntegerErrorMsg string = `must be an integer` +const isNotPositiveErrorMsg string = `must be greater than zero` var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) -var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" +var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" // BannedOwners is a black list of object that are not allowed to be owners. -var BannedOwners = genericvalidation.BannedOwners +var BannedOwners = apimachineryvalidation.BannedOwners var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`) var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`) @@ -91,7 +92,7 @@ func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expected // ValidateAnnotations validates that a set of annotations are correctly defined. func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateAnnotations(annotations, fldPath) + return apimachineryvalidation.ValidateAnnotations(annotations, fldPath) } func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { @@ -111,37 +112,37 @@ func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList return allErrs } -func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if value, isMirror := annotations[api.MirrorPodAnnotationKey]; isMirror { + if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror { if len(spec.NodeName) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) } } - if annotations[api.TolerationsAnnotationKey] != "" { + if annotations[core.TolerationsAnnotationKey] != "" { allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) } allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) - sysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey]) + sysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.SysctlsPodAnnotationKey]) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.SysctlsPodAnnotationKey), annotations[core.SysctlsPodAnnotationKey], err.Error())) } else { - allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...) + allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(core.SysctlsPodAnnotationKey))...) } - unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey]) + unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.UnsafeSysctlsPodAnnotationKey]) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), annotations[core.UnsafeSysctlsPodAnnotationKey], err.Error())) } else { - allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...) + allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(core.UnsafeSysctlsPodAnnotationKey))...) } inBoth := sysctlIntersection(sysctls, unsafeSysctls) if len(inBoth) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) } return allErrs @@ -153,18 +154,18 @@ func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error())) return allErrs } if len(tolerations) > 0 { - allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) + allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...) } return allErrs } -func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList { +func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} newAnnotations := newPod.Annotations oldAnnotations := oldPod.Annotations @@ -175,7 +176,7 @@ func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *fiel if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations")) } - if k == api.MirrorPodAnnotationKey { + if k == core.MirrorPodAnnotationKey { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation")) } } @@ -187,7 +188,7 @@ func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *fiel if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations")) } - if k == api.MirrorPodAnnotationKey { + if k == core.MirrorPodAnnotationKey { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation")) } } @@ -201,7 +202,7 @@ func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath } func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateOwnerReferences(ownerReferences, fldPath) + return apimachineryvalidation.ValidateOwnerReferences(ownerReferences, fldPath) } // ValidateNameFunc validates that the provided name is valid for a given resource type. @@ -273,7 +274,7 @@ var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountNa var ValidateEndpointsName = NameIsDNSSubdomain // ValidateClusterName can be used to check whether the given cluster name is valid. -var ValidateClusterName = genericvalidation.ValidateClusterName +var ValidateClusterName = apimachineryvalidation.ValidateClusterName // ValidateClassName can be used to check whether the given class name is valid. // It is defined here to avoid import cycle between pkg/apis/storage/validation @@ -284,7 +285,7 @@ var ValidateClassName = NameIsDNSSubdomain // class name is valid. var ValidatePriorityClassName = NameIsDNSSubdomain -// TODO update all references to these functions to point to the genericvalidation ones +// TODO update all references to these functions to point to the apimachineryvalidation ones // NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. func NameIsDNSSubdomain(name string, prefix bool) []string { return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) @@ -314,8 +315,17 @@ func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) f return allErrs } +// Validates that a Quantity is positive +func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg)) + } + return allErrs +} + func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateImmutableField(newVal, oldVal, fldPath) + return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath) } func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList { @@ -332,7 +342,7 @@ func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string // It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. // TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) + allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) // run additional checks for the finalizer name for i := range meta.Finalizers { allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) @@ -342,7 +352,7 @@ func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn // ValidateObjectMetaUpdate validates an object's metadata when updated func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) + allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) // run additional checks for the finalizer name for i := range newMeta.Finalizers { allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) @@ -352,13 +362,14 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel } func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) + return apimachineryvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) } -func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { +func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) { allErrs := field.ErrorList{} allNames := sets.String{} + vols := make(map[string]core.VolumeSource) for i, vol := range volumes { idxPath := fldPath.Index(i) namePath := idxPath.Child("name") @@ -373,15 +384,72 @@ func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, fi } if len(el) == 0 { allNames.Insert(vol.Name) + vols[vol.Name] = vol.VolumeSource } else { allErrs = append(allErrs, el...) } } - return allNames, allErrs + return vols, allErrs } -func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { +func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool { + if _, ok := volumes[name]; ok { + return true + } else { + return false + } +} + +func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { + if source, ok := volumes[name]; ok { + if source.PersistentVolumeClaim != nil { + return true, true + } else { + return true, false + } + } else { + return false, false + } +} + +func mountNameAlreadyExists(name string, devices map[string]string) bool { + if _, ok := devices[name]; ok { + return true + } else { + return false + } +} + +func mountPathAlreadyExists(mountPath string, devices map[string]string) bool { + for _, devPath := range devices { + if mountPath == devPath { + return true + } + } + + return false +} + +func deviceNameAlreadyExists(name string, mounts map[string]string) bool { + if _, ok := mounts[name]; ok { + return true + } else { + return false + } +} + +func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool { + for _, mountPath := range mounts { + if mountPath == devicePath { + return true + } + } + + return false +} + +func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { numVolumes := 0 allErrs := field.ErrorList{} if source.EmptyDir != nil { @@ -395,7 +463,7 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) } } - if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == api.StorageMediumHugePages { + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == core.StorageMediumHugePages { allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("medium"), "HugePages medium is disabled by feature-gate for EmptyDir volumes")) } } @@ -621,7 +689,7 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName return allErrs } -func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { +func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(hostPath.Path) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) @@ -633,7 +701,7 @@ func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *f return allErrs } -func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(gitRepo.Repository) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) @@ -644,7 +712,47 @@ func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *fiel return allErrs } -func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { +func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(iscsi.TargetPortal) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) @@ -668,6 +776,11 @@ func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) } + if iscsi.SecretRef != nil { + if len(iscsi.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } if iscsi.InitiatorName != nil { initiator := *iscsi.InitiatorName if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { @@ -684,7 +797,7 @@ func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path return allErrs } -func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 { allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both")) @@ -706,7 +819,7 @@ func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.E return allErrs } -func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(pd.PDName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) @@ -717,7 +830,7 @@ func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource return allErrs } -func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(PD.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -728,7 +841,7 @@ func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolume return allErrs } -func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { +func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(secretSource.SecretName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -736,7 +849,7 @@ func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *f secretMode := secretSource.DefaultMode if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg)) } itemsPath := fldPath.Child("items") @@ -747,7 +860,7 @@ func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *f return allErrs } -func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { +func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(configMapSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -755,7 +868,7 @@ func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, f configMapMode := configMapSource.DefaultMode if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg)) } itemsPath := fldPath.Child("items") @@ -766,7 +879,7 @@ func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, f return allErrs } -func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { +func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(kp.Key) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) @@ -776,13 +889,13 @@ func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { } allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg)) } return allErrs } -func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(claim.ClaimName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) @@ -790,7 +903,7 @@ func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeS return allErrs } -func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(nfs.Server) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) @@ -804,7 +917,7 @@ func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) fiel return allErrs } -func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { +func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(quobyte.Registry) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) @@ -822,7 +935,7 @@ func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *fiel return allErrs } -func validateGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(glusterfs.EndpointsName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) @@ -833,7 +946,7 @@ func validateGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, fldPath return allErrs } -func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { //TODO: consider adding a RequiredOneOf() error for this and similar cases @@ -848,14 +961,14 @@ func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *fiel return allErrs } -var validDownwardAPIFieldPathExpressions = sets.NewString( +var validVolumeDownwardAPIFieldPathExpressions = sets.NewString( "metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations", "metadata.uid") -func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { +func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(file.Path) == 0 { @@ -863,7 +976,7 @@ func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *fie } allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) if file.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) if file.ResourceFieldRef != nil { allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) } @@ -873,18 +986,18 @@ func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *fie allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) } if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg)) } return allErrs } -func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { +func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} downwardAPIMode := downwardAPIVolume.DefaultMode if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg)) } for _, file := range downwardAPIVolume.Items { @@ -893,7 +1006,7 @@ func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSou return allErrs } -func validateProjectionSources(projection *api.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { +func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allPaths := sets.String{} @@ -969,12 +1082,12 @@ func validateProjectionSources(projection *api.ProjectedVolumeSource, projection return allErrs } -func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { +func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} projectionMode := projection.DefaultMode if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg)) } allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) @@ -982,16 +1095,16 @@ func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPat } var supportedHostPathTypes = sets.NewString( - string(api.HostPathUnset), - string(api.HostPathDirectoryOrCreate), - string(api.HostPathDirectory), - string(api.HostPathFileOrCreate), - string(api.HostPathFile), - string(api.HostPathSocket), - string(api.HostPathCharDev), - string(api.HostPathBlockDev)) + string(core.HostPathUnset), + string(core.HostPathDirectoryOrCreate), + string(core.HostPathDirectory), + string(core.HostPathFileOrCreate), + string(core.HostPathFile), + string(core.HostPathSocket), + string(core.HostPathCharDev), + string(core.HostPathBlockDev)) -func validateHostPathType(hostPathType *api.HostPathType, fldPath *field.Path) field.ErrorList { +func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { @@ -1033,7 +1146,7 @@ func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.Error // validateMountPropagation verifies that MountPropagation field is valid and // allowed for given container. -func validateMountPropagation(mountPropagation *api.MountPropagationMode, container *api.Container, fldPath *field.Path) field.ErrorList { +func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if mountPropagation == nil { @@ -1044,7 +1157,7 @@ func validateMountPropagation(mountPropagation *api.MountPropagationMode, contai return allErrs } - supportedMountPropagations := sets.NewString(string(api.MountPropagationBidirectional), string(api.MountPropagationHostToContainer)) + supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer)) if !supportedMountPropagations.Has(string(*mountPropagation)) { allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) } @@ -1057,7 +1170,7 @@ func validateMountPropagation(mountPropagation *api.MountPropagationMode, contai } privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged - if *mountPropagation == api.MountPropagationBidirectional && !privileged { + if *mountPropagation == core.MountPropagationBidirectional && !privileged { allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers")) } return allErrs @@ -1077,7 +1190,7 @@ func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field. return allErrs } -func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { +func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(rbd.CephMonitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1088,7 +1201,7 @@ func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) fiel return allErrs } -func validateRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(rbd.CephMonitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1099,7 +1212,7 @@ func validateRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, fldPa return allErrs } -func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -1107,7 +1220,7 @@ func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) return allErrs } -func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cephfs.Monitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1115,7 +1228,7 @@ func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.P return allErrs } -func validateCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cephfs.Monitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1123,7 +1236,7 @@ func validateCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSour return allErrs } -func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fv.Driver) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) @@ -1144,7 +1257,7 @@ func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) fie return allErrs } -func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if azure.SecretName == "" { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -1155,7 +1268,7 @@ func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) fi return allErrs } -func validateAzureFilePV(azure *api.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if azure.SecretName == "" { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -1171,9 +1284,9 @@ func validateAzureFilePV(azure *api.AzureFilePersistentVolumeSource, fldPath *fi return allErrs } -func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { - var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite)) - var supportedDiskKinds = sets.NewString(string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) +func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { + var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite)) + var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk)) diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} @@ -1196,18 +1309,18 @@ func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) fi } // validate that DiskUri is the correct format - if azure.Kind != nil && *azure.Kind == api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { + if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged)) } - if azure.Kind != nil && *azure.Kind != api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { + if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob)) } return allErrs } -func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.VolumePath) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) @@ -1215,7 +1328,7 @@ func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath return allErrs } -func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.PdID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) @@ -1223,7 +1336,7 @@ func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolume return allErrs } -func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(pwx.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -1231,7 +1344,7 @@ func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field. return allErrs } -func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { +func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if sio.Gateway == "" { allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) @@ -1245,7 +1358,7 @@ func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Pa return allErrs } -func validateScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if sio.Gateway == "" { allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) @@ -1259,7 +1372,7 @@ func validateScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSourc return allErrs } -func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) field.ErrorList { +func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ls.Path == "" { allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) @@ -1270,7 +1383,7 @@ func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) f return allErrs } -func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(storageos.VolumeName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) @@ -1288,7 +1401,7 @@ func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath return allErrs } -func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(storageos.VolumeName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) @@ -1309,15 +1422,35 @@ func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentV return allErrs } +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath, "CSIPersistentVolume disabled by feature-gate")) + } + + if len(csi.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + if len(csi.VolumeHandle) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) + } + + return allErrs +} + // ValidatePersistentVolumeName checks that a name is appropriate for a // PersistentVolumeName object. var ValidatePersistentVolumeName = NameIsDNSSubdomain -var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) +var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) -var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain)) +var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) -func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { +var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) + +func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { metaPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath) @@ -1335,12 +1468,13 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) } - if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) + if _, ok := pv.Spec.Capacity[core.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { + allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(core.ResourceStorage)})) } capPath := specPath.Child("capacity") for r, qty := range pv.Spec.Capacity { allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...) } if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { @@ -1429,7 +1563,7 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) } else { numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) + allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) } if pv.Spec.ISCSI.InitiatorName != nil && len(pv.ObjectMeta.Name+":"+pv.Spec.ISCSI.TargetPortal) > 64 { tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." @@ -1531,12 +1665,21 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { } } + if pv.Spec.CSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("csi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCSIPersistentVolumeSource(pv.Spec.CSI, specPath.Child("csi"))...) + } + } + if numVolumes == 0 { allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) } // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy - if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { + if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle { allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) } @@ -1545,13 +1688,17 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg)) } } - + if pv.Spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("volumeMode"), "PersistentVolume volumeMode is disabled by feature-gate")) + } else if pv.Spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*pv.Spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("volumeMode"), *pv.Spec.VolumeMode, supportedVolumeModes.List())) + } return allErrs } // ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. // newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { +func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { allErrs := field.ErrorList{} allErrs = ValidatePersistentVolume(newPv) @@ -1561,12 +1708,17 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.Er } newPv.Status = oldPv.Status + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + return allErrs } // ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. // newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { +func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) if len(newPv.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -1576,14 +1728,14 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) fi } // ValidatePersistentVolumeClaim validates a PersistentVolumeClaim -func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) return allErrs } // ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec -func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { +func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(spec.AccessModes) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) @@ -1592,15 +1744,16 @@ func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldP allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) } for _, mode := range spec.AccessModes { - if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { + if mode != core.ReadWriteOnce && mode != core.ReadOnlyMany && mode != core.ReadWriteMany { allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) } } - storageValue, ok := spec.Resources.Requests[api.ResourceStorage] + storageValue, ok := spec.Resources.Requests[core.ResourceStorage] if !ok { - allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), "")) + allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), "")) } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) } if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { @@ -1608,11 +1761,16 @@ func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldP allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) } } + if spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "PersistentVolumeClaim volumeMode is disabled by feature-gate")) + } else if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List())) + } return allErrs } // ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim -func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) // PVController needs to update PVC.Spec w/ VolumeName. @@ -1628,7 +1786,7 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeCla newPVCSpecCopy := newPvc.Spec.DeepCopy() // lets make sure storage values are same. - if newPvc.Status.Phase == api.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { + if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] } @@ -1654,12 +1812,16 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeCla // TODO: remove Beta when no longer needed allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...) + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + newPvc.Status = oldPvc.Status return allErrs } // ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim -func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) if len(newPvc.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -1679,9 +1841,9 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVol return allErrs } -var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) +var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP)) -func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { +func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allNames := sets.String{} @@ -1720,7 +1882,7 @@ func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) fiel } // ValidateEnv validates env vars -func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { +func ValidateEnv(vars []core.EnvVar, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ev := range vars { @@ -1737,10 +1899,17 @@ func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { return allErrs } -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP") +var validEnvDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.serviceAccountName", + "status.hostIP", + "status.podIP") var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") -func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { +func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ev.ValueFrom == nil { @@ -1751,7 +1920,7 @@ func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList if ev.ValueFrom.FieldRef != nil { numSources++ - allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) + allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) } if ev.ValueFrom.ResourceFieldRef != nil { numSources++ @@ -1779,20 +1948,40 @@ func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList return allErrs } -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { +func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fs.APIVersion) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) - } else if len(fs.FieldPath) == 0 { + return allErrs + } + if len(fs.FieldPath) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) - } else { - internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) - } else if !expressions.Has(internalFieldPath) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) + return allErrs + } + + internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) + return allErrs + } + + if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok { + switch path { + case "metadata.annotations": + for _, msg := range validation.IsQualifiedName(strings.ToLower(subscript)) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + case "metadata.labels": + for _, msg := range validation.IsQualifiedName(subscript) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript")) } + } else if !expressions.Has(path) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List())) + return allErrs } return allErrs @@ -1805,7 +1994,7 @@ func fsResourceIsEphemeralStorage(resource string) bool { return false } -func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { +func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { allErrs := field.ErrorList{} if volume && len(fs.ContainerName) == 0 { @@ -1821,7 +2010,7 @@ func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expre return allErrs } -func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorList { +func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ev := range vars { idxPath := fldPath.Index(i) @@ -1850,7 +2039,7 @@ func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorL return allErrs } -func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { +func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(configMapSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -1862,7 +2051,7 @@ func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath return allErrs } -func validateSecretEnvSource(secretSource *api.SecretEnvSource, fldPath *field.Path) field.ErrorList { +func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(secretSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -1901,7 +2090,7 @@ func validateContainerResourceDivisor(rName string, divisor resource.Quantity, f return allErrs } -func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { +func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} nameFn := ValidateNameFunc(ValidateSecretName) @@ -1919,7 +2108,7 @@ func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Pa return allErrs } -func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { +func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} nameFn := ValidateNameFunc(ValidateSecretName) @@ -1937,7 +2126,27 @@ func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) fi return allErrs } -func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, container *api.Container, fldPath *field.Path) field.ErrorList { +func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string { + volmounts := make(map[string]string) + + for _, mnt := range mounts { + volmounts[mnt.Name] = mnt.MountPath + } + + return volmounts +} + +func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string { + voldevices := make(map[string]string) + + for _, dev := range devices { + voldevices[dev.Name] = dev.DevicePath + } + + return voldevices +} + +func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} mountpoints := sets.NewString() @@ -1945,7 +2154,8 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain idxPath := fldPath.Index(i) if len(mnt.Name) == 0 { allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !volumes.Has(mnt.Name) { + } + if !IsMatchedVolume(mnt.Name, volumes) { allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) } if len(mnt.MountPath) == 0 { @@ -1954,14 +2164,16 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain if mountpoints.Has(mnt.MountPath) { allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) } - if !path.IsAbs(mnt.MountPath) { - // also allow windows absolute path - p := mnt.MountPath - if len(p) < 2 || ((p[0] < 'A' || p[0] > 'Z') && (p[0] < 'a' || p[0] > 'z')) || p[1] != ':' { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path")) - } - } mountpoints.Insert(mnt.MountPath) + + // check for overlap with VolumeDevice + if mountNameAlreadyExists(mnt.Name, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices")) + } + if mountPathAlreadyExists(mnt.MountPath, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices")) + } + if len(mnt.SubPath) > 0 { allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) } @@ -1973,7 +2185,61 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain return allErrs } -func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { +func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + devicepath := sets.NewString() + devicename := sets.NewString() + + if devices != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeDevices"), "Container volumeDevices is disabled by feature-gate")) + return allErrs + } + if devices != nil { + for i, dev := range devices { + idxPath := fldPath.Index(i) + devName := dev.Name + devPath := dev.DevicePath + didMatch, isPVC := isMatchedDevice(devName, volumes) + if len(devName) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if devicename.Has(devName) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) + } + // Must be PersistentVolumeClaim volume source + if didMatch && !isPVC { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + } + if !didMatch { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) + } + if len(devPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) + } + if devicepath.Has(devPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) + } + if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) + } else { + devicepath.Insert(devPath) + } + // check for overlap with VolumeMount + if deviceNameAlreadyExists(devName, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) + } + if devicePathAlreadyExists(devPath, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) + } + if len(devName) > 0 { + devicename.Insert(devName) + } + } + } + return allErrs +} + +func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { @@ -1989,18 +2255,18 @@ func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { return allErrs } -func validateClientIPAffinityConfig(config *api.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { +func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if config == nil { - allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } if config.ClientIP == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } if config.ClientIP.TimeoutSeconds == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...) @@ -2010,15 +2276,15 @@ func validateClientIPAffinityConfig(config *api.SessionAffinityConfig, fldPath * func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if *timeout <= 0 || *timeout > api.MaxClientIPServiceAffinitySeconds { - allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", api.MaxClientIPServiceAffinitySeconds))) + if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds { + allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds))) } return allErrs } // AccumulateUniqueHostPorts extracts each HostPort of each Container, // accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { +func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for ci, ctr := range containers { @@ -2043,12 +2309,12 @@ func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.Str // checkHostPortConflicts checks for colliding Port.HostPort values across // a slice of containers. -func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { +func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList { allPorts := sets.String{} return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) } -func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { +func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if len(exec.Command) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) @@ -2056,9 +2322,9 @@ func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorLi return allErrors } -var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) +var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS)) -func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { +func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if len(http.Path) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) @@ -2091,11 +2357,11 @@ func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.E return allErrs } -func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { +func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList { return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) } -func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { +func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList { numHandlers := 0 allErrors := field.ErrorList{} if handler.Exec != nil { @@ -2128,7 +2394,7 @@ func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList return allErrors } -func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { +func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if lifecycle.PostStart != nil { allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) @@ -2139,13 +2405,13 @@ func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.Erro return allErrs } -var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) +var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever)) -func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { +func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch policy { - case api.PullAlways, api.PullIfNotPresent, api.PullNever: + case core.PullAlways, core.PullIfNotPresent, core.PullNever: break case "": allErrors = append(allErrors, field.Required(fldPath, "")) @@ -2156,10 +2422,10 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL return allErrors } -func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { +func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList if len(containers) > 0 { - allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) + allErrs = append(allErrs, validateContainers(containers, deviceVolumes, fldPath)...) } allNames := sets.String{} @@ -2187,7 +2453,7 @@ func validateInitContainers(containers, otherContainers []api.Container, volumes return allErrs } -func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { +func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(containers) == 0 { @@ -2198,6 +2464,9 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath for i, ctr := range containers { idxPath := fldPath.Index(i) namePath := idxPath.Child("name") + volMounts := GetVolumeMountMap(ctr.VolumeMounts) + volDevices := GetVolumeDeviceMap(ctr.VolumeDevices) + if len(ctr.Name) == 0 { allErrs = append(allErrs, field.Required(namePath, "")) } else { @@ -2224,7 +2493,7 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath } switch ctr.TerminationMessagePolicy { - case api.TerminationMessageReadFile, api.TerminationMessageFallbackToLogsOnError: + case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError: case "": allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'")) default: @@ -2235,7 +2504,8 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...) allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...) - allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, idxPath.Child("volumeDevices"))...) allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) @@ -2246,36 +2516,101 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath return allErrs } -func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { +func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch *restartPolicy { - case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: + case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever: break case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} + validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)} allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) } return allErrors } -func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { +func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch *dnsPolicy { - case api.DNSClusterFirstWithHostNet, api.DNSClusterFirst, api.DNSDefault: - break + case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault: + case core.DNSNone: + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + allErrors = append(allErrors, field.Invalid(fldPath, dnsPolicy, "DNSPolicy: can not use 'None', custom pod DNS is disabled by feature gate")) + } case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(api.DNSClusterFirstWithHostNet), string(api.DNSClusterFirst), string(api.DNSDefault)} + validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault)} + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + validValues = append(validValues, string(core.DNSNone)) + } allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) } return allErrors } -func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { +const ( + // Limits on various DNS parameters. These are derived from + // restrictions in Linux libc name resolution handling. + // Max number of DNS name servers. + MaxDNSNameservers = 3 + // Max number of domains in search path. + MaxDNSSearchPaths = 6 + // Max number of characters in search path. + MaxDNSSearchListChars = 256 +) + +func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Validate DNSNone case. Must provide at least one DNS name server. + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) && dnsPolicy != nil && *dnsPolicy == core.DNSNone { + if dnsConfig == nil { + return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone))) + } + if len(dnsConfig.Nameservers) == 0 { + return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone))) + } + } + + if dnsConfig != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + return append(allErrs, field.Forbidden(fldPath, "DNSConfig: custom pod DNS is disabled by feature gate")) + } + + // Validate nameservers. + if len(dnsConfig.Nameservers) > MaxDNSNameservers { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers))) + } + for i, ns := range dnsConfig.Nameservers { + if ip := net.ParseIP(ns); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address")) + } + } + // Validate searches. + if len(dnsConfig.Searches) > MaxDNSSearchPaths { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths))) + } + // Include the space between search paths. + if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, "must not have more than 256 characters (including spaces) in the search list")) + } + for i, search := range dnsConfig.Searches { + allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + } + // Validate options. + for i, option := range dnsConfig.Options { + if len(option.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty")) + } + } + } + return allErrs +} + +func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if hostNetwork { for i, container := range containers { @@ -2295,11 +2630,11 @@ func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath * // formed. Right now, we only expect name to be set (it's the only field). If // this ever changes and someone decides to set those fields, we'd like to // know. -func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { +func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} for i, currPullSecret := range imagePullSecrets { idxPath := fldPath.Index(i) - strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} + strippedRef := core.LocalObjectReference{Name: currPullSecret.Name} if !reflect.DeepEqual(strippedRef, currPullSecret) { allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) } @@ -2308,7 +2643,7 @@ func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPa } // validateAffinity checks if given affinities are valid -func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorList { +func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if affinity != nil { @@ -2337,7 +2672,7 @@ func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorLi return allErrs } -func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { +func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { if !allowEmpty && len(*effect) == 0 { return field.ErrorList{field.Required(fldPath, "")} } @@ -2345,15 +2680,15 @@ func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *fiel allErrors := field.ErrorList{} switch *effect { // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. - case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoExecute: - // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoExecute: + case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute: + // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute: default: validValues := []string{ - string(api.TaintEffectNoSchedule), - string(api.TaintEffectPreferNoSchedule), - string(api.TaintEffectNoExecute), + string(core.TaintEffectNoSchedule), + string(core.TaintEffectPreferNoSchedule), + string(core.TaintEffectNoExecute), // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. - // string(api.TaintEffectNoScheduleNoAdmit), + // string(core.TaintEffectNoScheduleNoAdmit), } allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) } @@ -2361,7 +2696,7 @@ func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *fiel } // validateOnlyAddedTolerations validates updated pod tolerations. -func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldTolerations []api.Toleration, fldPath *field.Path) field.ErrorList { +func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, old := range oldTolerations { found := false @@ -2383,7 +2718,7 @@ func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldToleration return allErrs } -func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field.ErrorList { +func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, hostAlias := range hostAliases { if ip := net.ParseIP(hostAlias.IP); ip == nil { @@ -2397,7 +2732,7 @@ func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field } // ValidateTolerations tests if given tolerations have valid data. -func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { +func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} for i, toleration := range tolerations { idxPath := fldPath.Index(i) @@ -2407,12 +2742,12 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel } // empty toleration key with Exists operator and empty value means match all taints - if len(toleration.Key) == 0 && toleration.Operator != api.TolerationOpExists { + if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) } - if toleration.TolerationSeconds != nil && toleration.Effect != api.TaintEffectNoExecute { + if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute { allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, "effect must be 'NoExecute' when `tolerationSeconds` is set")) } @@ -2420,16 +2755,16 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel // validate toleration operator and value switch toleration.Operator { // empty operator means Equal - case api.TolerationOpEqual, "": + case core.TolerationOpEqual, "": if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) } - case api.TolerationOpExists: + case core.TolerationOpExists: if len(toleration.Value) > 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) } default: - validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} + validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)} allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) } @@ -2441,15 +2776,15 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel return allErrors } -func toResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} +func toResourceNames(resources core.ResourceList) []core.ResourceName { + result := []core.ResourceName{} for resourceName := range resources { result = append(result, resourceName) } return result } -func toSet(resourceNames []api.ResourceName) sets.String { +func toSet(resourceNames []core.ResourceName) sets.String { result := sets.NewString() for _, resourceName := range resourceNames { result.Insert(string(resourceName)) @@ -2457,7 +2792,7 @@ func toSet(resourceNames []api.ResourceName) sets.String { return result } -func toContainerResourcesSet(ctr *api.Container) sets.String { +func toContainerResourcesSet(ctr *core.Container) sets.String { resourceNames := toResourceNames(ctr.Resources.Requests) resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) return toSet(resourceNames) @@ -2465,7 +2800,7 @@ func toContainerResourcesSet(ctr *api.Container) sets.String { // validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template // it only does additive validation of fields not covered in validateContainers -func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Path) field.ErrorList { +func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ctr := range containers { idxPath := fldPath.Index(i) @@ -2477,7 +2812,7 @@ func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Pat } // ValidatePod tests if required fields in the pod are set. -func ValidatePod(pod *api.Pod) field.ErrorList { +func ValidatePod(pod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) @@ -2512,19 +2847,20 @@ func ValidatePod(pod *api.Pod) field.ErrorList { // This includes checking formatting and uniqueness. It also canonicalizes the // structure by setting default values and implementing any backwards-compatibility // tricks. -func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allVolumes, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) + vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) allErrs = append(allErrs, vErrs...) - allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) + allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...) + allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"))...) if len(spec.ServiceAccountName) > 0 { for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) @@ -2578,19 +2914,19 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { } // ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch rq.Operator { - case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: + case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: if len(rq.Values) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) } - case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: + case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist: if len(rq.Values) > 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } - case api.NodeSelectorOpGt, api.NodeSelectorOpLt: + case core.NodeSelectorOpGt, core.NodeSelectorOpLt: if len(rq.Values) != 1 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) } @@ -2602,7 +2938,7 @@ func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *fi } // ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(term.MatchExpressions) == 0 { @@ -2615,7 +2951,7 @@ func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) fi } // ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} termFldPath := fldPath.Child("nodeSelectorTerms") @@ -2636,18 +2972,18 @@ func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath * v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) return allErrs } - var avoids api.AvoidPods - if err := k8s_api_v1.Convert_v1_AvoidPods_To_api_AvoidPods(&v1Avoids, &avoids, nil); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) + var avoids core.AvoidPods + if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) return allErrs } if len(avoids.PreferAvoidPods) != 0 { for i, pa := range avoids.PreferAvoidPods { - idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i) + idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i) allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) } } @@ -2656,7 +2992,7 @@ func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath * } // validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. -func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { +func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if avoidPodEntry.PodSignature.PodController == nil { allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) @@ -2671,7 +3007,7 @@ func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPat } // ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data -func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { +func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, term := range terms { @@ -2685,7 +3021,7 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa } // validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data -func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) @@ -2701,7 +3037,7 @@ func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, fldPath *field } // validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data -func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, podAffinityTerm := range podAffinityTerms { allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...) @@ -2710,7 +3046,7 @@ func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, fldPath *f } // validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data -func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for j, weightedTerm := range weightedPodAffinityTerms { if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { @@ -2722,7 +3058,7 @@ func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPod } // validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data -func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { +func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { @@ -2741,7 +3077,7 @@ func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *fiel } // validatePodAffinity tests that the specified podAffinity fields have valid data -func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { +func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { @@ -2774,11 +3110,11 @@ func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) + if p, exists := annotations[core.SeccompPodAnnotationKey]; exists { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...) } for k, p := range annotations { - if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { + if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) { allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) } } @@ -2786,7 +3122,7 @@ func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field return allErrs } -func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for k, p := range annotations { if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { @@ -2810,7 +3146,7 @@ func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.Pod return allErrs } -func podSpecHasContainer(spec *api.PodSpec, containerName string) bool { +func podSpecHasContainer(spec *core.PodSpec, containerName string) bool { for _, c := range spec.InitContainers { if c.Name == containerName { return true @@ -2846,7 +3182,7 @@ func IsValidSysctlName(name string) bool { return sysctlRegexp.MatchString(name) } -func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList { +func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, s := range sysctls { if len(s.Name) == 0 { @@ -2859,7 +3195,7 @@ func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList } // ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { +func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if securityContext != nil { @@ -2884,7 +3220,7 @@ func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *a return allErrs } -func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { +func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { allErrs = field.ErrorList{} if len(newContainers) != len(oldContainers) { //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff @@ -2907,7 +3243,7 @@ func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldP // ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { +func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) @@ -2952,14 +3288,14 @@ func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { // handle updateable fields by munging those fields prior to deep equal comparison. mungedPod := *newPod // munge spec.containers[*].image - var newContainers []api.Container + var newContainers []core.Container for ix, container := range mungedPod.Spec.Containers { container.Image = oldPod.Spec.Containers[ix].Image newContainers = append(newContainers, container) } mungedPod.Spec.Containers = newContainers // munge spec.initContainers[*].image - var newInitContainers []api.Container + var newInitContainers []core.Container for ix, container := range mungedPod.Spec.InitContainers { container.Image = oldPod.Spec.InitContainers[ix].Image newInitContainers = append(newInitContainers, container) @@ -2988,7 +3324,7 @@ func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { // ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { +func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) @@ -3004,7 +3340,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { } // ValidatePodBinding tests if required fields in the pod binding are legal. -func ValidatePodBinding(binding *api.Binding) field.ErrorList { +func ValidatePodBinding(binding *core.Binding) field.ErrorList { allErrs := field.ErrorList{} if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { @@ -3020,7 +3356,7 @@ func ValidatePodBinding(binding *api.Binding) field.ErrorList { } // ValidatePodTemplate tests if required fields in the pod template are set. -func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { +func ValidatePodTemplate(pod *core.PodTemplate) field.ErrorList { allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) return allErrs @@ -3028,27 +3364,27 @@ func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { // ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { +func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) return allErrs } -var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), - string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName)) +var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), + string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) // ValidateService tests if required fields/annotations of a Service are valid. -func ValidateService(service *api.Service) field.ErrorList { +func ValidateService(service *core.Service) field.ErrorList { allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) specPath := field.NewPath("spec") - isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone - if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName { + isHeadlessService := service.Spec.ClusterIP == core.ClusterIPNone + if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != core.ServiceTypeExternalName { allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) } switch service.Spec.Type { - case api.ServiceTypeLoadBalancer: + case core.ServiceTypeLoadBalancer: for ix := range service.Spec.Ports { port := &service.Spec.Ports[ix] // This is a workaround for broken cloud environments that @@ -3062,11 +3398,11 @@ func ValidateService(service *api.Service) field.ErrorList { if service.Spec.ClusterIP == "None" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) } - case api.ServiceTypeNodePort: + case core.ServiceTypeNodePort: if service.Spec.ClusterIP == "None" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services")) } - case api.ServiceTypeExternalName: + case core.ServiceTypeExternalName: if service.Spec.ClusterIP != "" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) } @@ -3094,11 +3430,11 @@ func ValidateService(service *api.Service) field.ErrorList { allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) } - if service.Spec.SessionAffinity == api.ServiceAffinityClientIP { + if service.Spec.SessionAffinity == core.ServiceAffinityClientIP { allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) - } else if service.Spec.SessionAffinity == api.ServiceAffinityNone { + } else if service.Spec.SessionAffinity == core.ServiceAffinityNone { if service.Spec.SessionAffinityConfig != nil { - allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(api.ServiceAffinityNone)))) + allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone)))) } } @@ -3126,7 +3462,7 @@ func ValidateService(service *api.Service) field.ErrorList { allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) } - if service.Spec.Type == api.ServiceTypeLoadBalancer { + if service.Spec.Type == core.ServiceTypeLoadBalancer { portsPath := specPath.Child("ports") includeProtocols := sets.NewString() for i := range service.Spec.Ports { @@ -3142,7 +3478,7 @@ func ValidateService(service *api.Service) field.ErrorList { } } - if service.Spec.Type == api.ServiceTypeClusterIP { + if service.Spec.Type == core.ServiceTypeClusterIP { portsPath := specPath.Child("ports") for i := range service.Spec.Ports { portPath := portsPath.Index(i) @@ -3154,14 +3490,14 @@ func ValidateService(service *api.Service) field.ErrorList { // Check for duplicate NodePorts, considering (protocol,port) pairs portsPath = specPath.Child("ports") - nodePorts := make(map[api.ServicePort]bool) + nodePorts := make(map[core.ServicePort]bool) for i := range service.Spec.Ports { port := &service.Spec.Ports[i] if port.NodePort == 0 { continue } portPath := portsPath.Index(i) - var key api.ServicePort + var key core.ServicePort key.Protocol = port.Protocol key.NodePort = port.NodePort _, found := nodePorts[key] @@ -3173,10 +3509,10 @@ func ValidateService(service *api.Service) field.ErrorList { // Check for duplicate Ports, considering (protocol,port) pairs portsPath = specPath.Child("ports") - ports := make(map[api.ServicePort]bool) + ports := make(map[core.ServicePort]bool) for i, port := range service.Spec.Ports { portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, Port: port.Port} + key := core.ServicePort{Protocol: port.Protocol, Port: port.Port} _, found := ports[key] if found { allErrs = append(allErrs, field.Duplicate(portPath, key)) @@ -3184,24 +3520,8 @@ func ValidateService(service *api.Service) field.ErrorList { ports[key] = true } - // Check for duplicate TargetPort - portsPath = specPath.Child("ports") - targetPorts := make(map[api.ServicePort]bool) - for i, port := range service.Spec.Ports { - if (port.TargetPort.Type == intstr.Int && port.TargetPort.IntVal == 0) || (port.TargetPort.Type == intstr.String && port.TargetPort.StrVal == "") { - continue - } - portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, TargetPort: port.TargetPort} - _, found := targetPorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("targetPort"), port.TargetPort)) - } - targetPorts[key] = true - } - // Validate SourceRange field and annotation - _, ok := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] + _, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { var fieldPath *field.Path var val string @@ -3209,10 +3529,10 @@ func ValidateService(service *api.Service) field.ErrorList { fieldPath = specPath.Child("LoadBalancerSourceRanges") val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) } else { - fieldPath = field.NewPath("metadata", "annotations").Key(api.AnnotationLoadBalancerSourceRangesKey) - val = service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] + fieldPath = field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] } - if service.Spec.Type != api.ServiceTypeLoadBalancer { + if service.Spec.Type != core.ServiceTypeLoadBalancer { allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) } _, err := apiservice.GetLoadBalancerSourceRanges(service) @@ -3226,7 +3546,7 @@ func ValidateService(service *api.Service) field.ErrorList { return allErrs } -func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { +func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if requireName && len(sp.Name) == 0 { @@ -3266,15 +3586,15 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo // validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations // have legal value. -func validateServiceExternalTrafficFieldsValue(service *api.Service) field.ErrorList { +func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} // Check first class fields. if service.Spec.ExternalTrafficPolicy != "" && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeCluster && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeLocal { + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, - fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", api.ServiceExternalTrafficPolicyTypeCluster, api.ServiceExternalTrafficPolicyTypeLocal))) + fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal))) } if service.Spec.HealthCheckNodePort < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, @@ -3288,11 +3608,11 @@ func validateServiceExternalTrafficFieldsValue(service *api.Service) field.Error // HealthCheckNodePort and Type combination are legal. For update, it should be called // after clearing externalTraffic related fields for the ease of transitioning between // different service types. -func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field.ErrorList { +func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} - if service.Spec.Type != api.ServiceTypeLoadBalancer && - service.Spec.Type != api.ServiceTypeNodePort && + if service.Spec.Type != core.ServiceTypeLoadBalancer && + service.Spec.Type != core.ServiceTypeNodePort && service.Spec.ExternalTrafficPolicy != "" { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service")) @@ -3308,13 +3628,13 @@ func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field } // ValidateServiceUpdate tests if required fields in the service are set during an update -func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { +func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) // ClusterIP should be immutable for services using it (every type other than ExternalName) // which do not have ClusterIP assigned yet (empty string value) - if service.Spec.Type != api.ServiceTypeExternalName { - if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { + if service.Spec.Type != core.ServiceTypeExternalName { + if oldService.Spec.Type != core.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) } } @@ -3324,34 +3644,34 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { } // ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. -func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { +func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) return allErrs } // ValidateReplicationController tests if required fields in the replication controller are set. -func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { +func ValidateReplicationController(controller *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) return allErrs } // ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { +func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) return allErrs } // ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { +func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...) return allErrs } -func ValidateReplicationControllerStatus(status api.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { +func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) @@ -3385,7 +3705,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -3403,8 +3723,8 @@ func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) } // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + if template.Spec.RestartPolicy != core.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) } if template.Spec.ActiveDeadlineSeconds != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) @@ -3414,7 +3734,7 @@ func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map } // ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { +func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) @@ -3424,7 +3744,7 @@ func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldP } // ValidatePodTemplateSpec validates the spec of a pod template -func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) @@ -3433,7 +3753,7 @@ func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) fie return allErrs } -func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { +func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i := range volumes { vol := &volumes[i] @@ -3454,22 +3774,22 @@ func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *fie taints, err := helper.GetTaintsFromNodeAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error())) return allErrs } if len(taints) > 0 { - allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) + allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...) } return allErrs } // validateNodeTaints tests if given taints have valid data. -func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { +func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} - uniqueTaints := map[api.TaintEffect]sets.String{} + uniqueTaints := map[core.TaintEffect]sets.String{} for i, currTaint := range taints { idxPath := fldPath.Index(i) @@ -3502,18 +3822,18 @@ func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if annotations[api.TaintsAnnotationKey] != "" { + if annotations[core.TaintsAnnotationKey] != "" { allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) } - if annotations[api.PreferAvoidPodsAnnotationKey] != "" { + if annotations[core.PreferAvoidPodsAnnotationKey] != "" { allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) } return allErrs } // ValidateNode tests if required fields in the node are set. -func ValidateNode(node *api.Node) field.ErrorList { +func ValidateNode(node *core.Node) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) @@ -3536,12 +3856,17 @@ func ValidateNode(node *api.Node) field.ErrorList { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) } - // TODO(rjnagal): Ignore PodCIDR till its completely implemented. + if len(node.Spec.PodCIDR) != 0 { + _, err := ValidateCIDR(node.Spec.PodCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "podCIDR"), node.Spec.PodCIDR, "not a valid CIDR")) + } + } return allErrs } // ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. -func ValidateNodeResources(node *api.Node) field.ErrorList { +func ValidateNodeResources(node *core.Node) field.ErrorList { allErrs := field.ErrorList{} // Validate resource quantities in capacity. hugePageSizes := sets.NewString() @@ -3573,21 +3898,21 @@ func ValidateNodeResources(node *api.Node) field.ErrorList { } // ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { +func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - // TODO: Enable the code once we have better api object.status update model. Currently, + // TODO: Enable the code once we have better core object.status update model. Currently, // anyone can update node status. - // if !apiequality.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { + // if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) { // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) // } allErrs = append(allErrs, ValidateNodeResources(node)...) // Validate no duplicate addresses in node status. - addresses := make(map[api.NodeAddress]bool) + addresses := make(map[core.NodeAddress]bool) for i, address := range node.Status.Addresses { if _, ok := addresses[address]; ok { allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) @@ -3647,12 +3972,6 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { // Validate compute resource typename. // Refer to docs/design/resources.md for more details. func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - // Opaque integer resources (OIR) deprecation began in v1.8 - // TODO: Remove warning after OIR deprecation cycle. - if helper.IsOpaqueIntResourceName(api.ResourceName(value)) { - glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value) - } - allErrs := field.ErrorList{} for _, msg := range validation.IsQualifiedName(value) { allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) @@ -3685,8 +4004,8 @@ func validateContainerResourceName(value string, fldPath *field.Path) field.Erro // isLocalStorageResource checks whether the resource is local ephemeral storage func isLocalStorageResource(name string) bool { - if name == string(api.ResourceEphemeralStorage) || name == string(api.ResourceRequestsEphemeralStorage) || - name == string(api.ResourceLimitsEphemeralStorage) { + if name == string(core.ResourceEphemeralStorage) || name == string(core.ResourceRequestsEphemeralStorage) || + name == string(core.ResourceLimitsEphemeralStorage) { return true } else { return false @@ -3729,13 +4048,13 @@ func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorLi // Validate limit range resource name // limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { +func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if value == string(api.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if value == string(core.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for Resource LimitRange")) } switch limitType { - case api.LimitTypePod, api.LimitTypeContainer: + case core.LimitTypePod, core.LimitTypeContainer: return validateContainerResourceName(value, fldPath) default: return validateResourceName(value, fldPath) @@ -3743,11 +4062,11 @@ func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPa } // ValidateLimitRange tests if required fields in the LimitRange are set. -func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { +func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) // ensure resource names are properly qualified per docs/design/resources.md - limitTypeSet := map[api.LimitType]bool{} + limitTypeSet := map[core.LimitType]bool{} fldPath := field.NewPath("spec", "limits") for i := range limitRange.Spec.Limits { idxPath := fldPath.Index(i) @@ -3778,7 +4097,7 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { min[string(k)] = q } - if limit.Type == api.LimitTypePod { + if limit.Type == core.LimitTypePod { if len(limit.Default) > 0 { allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) } @@ -3798,9 +4117,9 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { } } - if limit.Type == api.LimitTypePersistentVolumeClaim { - _, minQuantityFound := limit.Min[api.ResourceStorage] - _, maxQuantityFound := limit.Max[api.ResourceStorage] + if limit.Type == core.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[core.ResourceStorage] + _, maxQuantityFound := limit.Max[core.ResourceStorage] if !minQuantityFound && !maxQuantityFound { allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) } @@ -3859,6 +4178,11 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) } } + + // for GPU and hugepages, the default value and defaultRequest value must match if both are specified + if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k))) + } } } @@ -3866,20 +4190,20 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { } // ValidateServiceAccount tests if required fields in the ServiceAccount are set. -func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { +func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList { allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) return allErrs } // ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. -func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { +func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) return allErrs } // ValidateSecret tests if required fields in the Secret are set. -func ValidateSecret(secret *api.Secret) field.ErrorList { +func ValidateSecret(secret *core.Secret) field.ErrorList { allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) dataPath := field.NewPath("data") @@ -3890,63 +4214,63 @@ func ValidateSecret(secret *api.Secret) field.ErrorList { } totalSize += len(value) } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize)) } switch secret.Type { - case api.SecretTypeServiceAccountToken: + case core.SecretTypeServiceAccountToken: // Only require Annotations[kubernetes.io/service-account.name] // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop - if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) + if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), "")) } - case api.SecretTypeOpaque, "": + case core.SecretTypeOpaque, "": // no-op - case api.SecretTypeDockercfg: - dockercfgBytes, exists := secret.Data[api.DockerConfigKey] + case core.SecretTypeDockercfg: + dockercfgBytes, exists := secret.Data[core.DockerConfigKey] if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), "")) break } // make sure that the content is well-formed json. if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "", err.Error())) + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "", err.Error())) } - case api.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] + case core.SecretTypeDockerConfigJson: + dockerConfigJsonBytes, exists := secret.Data[core.DockerConfigJsonKey] if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJsonKey), "")) break } // make sure that the content is well-formed json. if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "", err.Error())) + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJsonKey), "", err.Error())) } - case api.SecretTypeBasicAuth: - _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] - _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] + case core.SecretTypeBasicAuth: + _, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey] + _, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey] // username or password might be empty, but the field must be present if !usernameFieldExists && !passwordFieldExists { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthUsernameKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthPasswordKey), "")) break } - case api.SecretTypeSSHAuth: - if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) + case core.SecretTypeSSHAuth: + if len(secret.Data[core.SSHAuthPrivateKey]) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.SSHAuthPrivateKey), "")) break } - case api.SecretTypeTLS: - if _, exists := secret.Data[api.TLSCertKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) + case core.SecretTypeTLS: + if _, exists := secret.Data[core.TLSCertKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), "")) } - if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) + if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), "")) } // TODO: Verify that the key matches the cert. default: @@ -3957,7 +4281,7 @@ func ValidateSecret(secret *api.Secret) field.ErrorList { } // ValidateSecretUpdate tests if required fields in the Secret are set. -func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { +func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) if len(newSecret.Type) == 0 { @@ -3976,7 +4300,7 @@ func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { var ValidateConfigMapName = NameIsDNSSubdomain // ValidateConfigMap tests whether required fields in the ConfigMap are set. -func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { +func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) @@ -3988,15 +4312,15 @@ func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { } totalSize += len(value) } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", core.MaxSecretSize)) } return allErrs } // ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. -func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { +func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) allErrs = append(allErrs, ValidateConfigMap(newCfg)...) @@ -4012,7 +4336,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel } // Validates resource requirement spec. -func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { +func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} limPath := fldPath.Child("limits") reqPath := fldPath.Child("requests") @@ -4024,7 +4348,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat // Validate resource quantity. allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - if resourceName == api.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) } if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { @@ -4044,12 +4368,12 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat if exists { // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) } else if quantity.Cmp(limitQuantity) > 0 { allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) } - } else if resourceName == api.ResourceNvidiaGPU { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", api.ResourceNvidiaGPU))) + } else if resourceName == core.ResourceNvidiaGPU { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU))) } } @@ -4057,7 +4381,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat } // validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes -func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { +func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(resourceQuotaSpec.Scopes) == 0 { return allErrs @@ -4080,8 +4404,8 @@ func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld * scopeSet.Insert(string(scope)) } invalidScopePairs := []sets.String{ - sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), + sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), } for _, invalidScopePair := range invalidScopePairs { if scopeSet.HasAll(invalidScopePair.List()...) { @@ -4092,7 +4416,7 @@ func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld * } // ValidateResourceQuota tests if required fields in the ResourceQuota are set. -func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuota(resourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) @@ -4101,7 +4425,7 @@ func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { return allErrs } -func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList { +func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} fldPath := fld.Child("hard") @@ -4120,7 +4444,7 @@ func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Pat return allErrs } -func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { +func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} fldPath := fld.Child("hard") @@ -4148,7 +4472,7 @@ func ValidateResourceQuantityValue(resource string, value resource.Quantity, fld // ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. // newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) @@ -4172,7 +4496,7 @@ func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.Resourc // ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. // newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) if len(newResourceQuota.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -4194,7 +4518,7 @@ func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.R } // ValidateNamespace tests if required fields are set. -func ValidateNamespace(namespace *api.Namespace) field.ErrorList { +func ValidateNamespace(namespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) for i := range namespace.Spec.Finalizers { allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) @@ -4204,7 +4528,7 @@ func ValidateNamespace(namespace *api.Namespace) field.ErrorList { // Validate finalizer names func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateFinalizerName(stringValue, fldPath) + allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath) for _, err := range validateKubeFinalizerName(stringValue, fldPath) { allErrs = append(allErrs, err) } @@ -4226,7 +4550,7 @@ func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.Er // ValidateNamespaceUpdate tests to make sure a namespace update can be applied. // newNamespace is updated with fields that cannot be changed -func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers newNamespace.Status = oldNamespace.Status @@ -4235,15 +4559,15 @@ func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Name // ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields // that cannot be changed. -func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) newNamespace.Spec = oldNamespace.Spec if newNamespace.DeletionTimestamp.IsZero() { - if newNamespace.Status.Phase != api.NamespaceActive { + if newNamespace.Status.Phase != core.NamespaceActive { allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) } } else { - if newNamespace.Status.Phase != api.NamespaceTerminating { + if newNamespace.Status.Phase != core.NamespaceTerminating { allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) } } @@ -4252,7 +4576,7 @@ func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) fi // ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. // newNamespace is updated with fields that cannot be changed. -func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) fldPath := field.NewPath("spec", "finalizers") @@ -4265,7 +4589,7 @@ func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) } // Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) { +func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []core.EndpointAddress) { for n := range addresses { if addresses[n].NodeName == nil { continue @@ -4275,7 +4599,7 @@ func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.E } // Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string { +func buildEndpointAddressNodeNameMap(subsets []core.EndpointSubset) map[string]string { ipToNodeName := make(map[string]string) for i := range subsets { updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) @@ -4284,7 +4608,7 @@ func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]st return ipToNodeName } -func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { +func validateEpAddrNodeNameTransition(addr *core.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { errList := field.ErrorList{} existingNodeName, found := ipToNodeName[addr.IP] if !found { @@ -4298,14 +4622,14 @@ func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName ma } // ValidateEndpoints tests if required fields are set. -func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { +func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []core.EndpointSubset{}, field.NewPath("subsets"))...) return allErrs } -func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { +func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) for i := range subsets { @@ -4331,7 +4655,7 @@ func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.Endp return allErrs } -func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { allErrs := field.ErrorList{} for _, msg := range validation.IsValidIP(address.IP) { allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) @@ -4379,7 +4703,7 @@ func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList return allErrs } -func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { +func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if requireName && len(port.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -4398,7 +4722,7 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie } // ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. -func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { +func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) @@ -4406,9 +4730,9 @@ func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.Er } // ValidateSecurityContext ensure the security context contains valid settings -func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { +func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - //this should only be true for testing since SecurityContext is defaulted by the api + //this should only be true for testing since SecurityContext is defaulted by the core if sc == nil { return allErrs } @@ -4442,7 +4766,7 @@ func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field return allErrs } -func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { +func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { allErrs := field.ErrorList{} if opts.TailLines != nil && *opts.TailLines < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) @@ -4462,7 +4786,7 @@ func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { } // ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { +func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ingress := range status.Ingress { idxPath := fldPath.Child("ingress").Index(i) @@ -4483,7 +4807,7 @@ func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.P return allErrs } -func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string { +func sysctlIntersection(a []core.Sysctl, b []core.Sysctl) []string { lookup := make(map[string]struct{}, len(a)) result := []string{} for i := range a { @@ -4503,7 +4827,7 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat na, err := helper.GetStorageNodeAffinityFromAnnotation(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.AlphaStorageNodeAffinityAnnotation, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.AlphaStorageNodeAffinityAnnotation, err.Error())) return false, allErrs } if na == nil { @@ -4525,3 +4849,12 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat } return policySpecified, allErrs } + +// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR +func ValidateCIDR(cidr string) (*net.IPNet, error) { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + return net, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 1b5c7962..186760a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -18,750 +18,15 @@ limitations under the License. // This file was autogenerated by deepcopy-gen. Do not edit it manually! -package api +package core import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { *out = *in @@ -981,6 +246,22 @@ func (in *Binding) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -1425,6 +706,11 @@ func (in *Container) DeepCopyInto(out *Container) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe if *in == nil { @@ -2097,6 +1383,25 @@ func (in *Event) DeepCopyInto(out *Event) { out.Source = in.Source in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } return } @@ -2153,6 +1458,23 @@ func (in *EventList) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSource) DeepCopyInto(out *EventSource) { *out = *in @@ -2448,6 +1770,45 @@ func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { *out = *in @@ -3604,6 +2965,15 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec **out = **in } } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -3773,7 +3143,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ISCSIVolumeSource) + *out = new(ISCSIPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3894,6 +3264,15 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { (*in).DeepCopyInto(*out) } } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } return } @@ -3937,6 +3316,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -4145,6 +3533,64 @@ func (in *PodCondition) DeepCopy() *PodCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { *out = *in @@ -4515,6 +3961,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { **out = **in } } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } return } @@ -6030,6 +5485,22 @@ func (in *Volume) DeepCopy() *Volume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go index fbce8ee7..d97cffdb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package extensions // import "k8s.io/kubernetes/pkg/apis/extensions" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 7db398c6..48137fc6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,7 @@ package extensions import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -51,19 +52,15 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentList{}, &DeploymentRollback{}, &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, + &autoscaling.Scale{}, &networking.NetworkPolicy{}, &networking.NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index de4bbe37..e3697284 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -42,44 +42,6 @@ const ( SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" ) -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Dummy definition @@ -111,63 +73,8 @@ type CustomMetricCurrentStatusList struct { } // +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta - - // Standard object metadata - // +optional - metav1.ObjectMeta - - // Description is the description of this object. - // +optional - Description string - - // Versions are versions for this third party object - Versions []APIVersion -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceList struct { - metav1.TypeMeta - - // Standard list metadata. - // +optional - metav1.ListMeta - - // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource -} - -// An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in metav1.go -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta - // Standard object metadata. - // +optional - metav1.ObjectMeta - - // Data is the raw JSON data for this data. - // +optional - Data []byte -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Deployment struct { @@ -524,6 +431,27 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string } // +genclient @@ -573,18 +501,6 @@ type DaemonSetList struct { Items []DaemonSet } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceDataList struct { - metav1.TypeMeta - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - // Items is a list of third party objects - Items []ThirdPartyResourceData -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -755,8 +671,8 @@ type IngressBackend struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -888,7 +804,8 @@ type PodSecurityPolicySpec struct { Privileged bool // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capability in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []api.Capability // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -943,6 +860,11 @@ type PodSecurityPolicySpec struct { // AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -962,9 +884,9 @@ type AllowedHostPath struct { // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // Min is the start of the range, inclusive. - Min int + Min int32 // Max is the end of the range, inclusive. - Max int + Max int32 } // AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities @@ -1002,9 +924,16 @@ var ( Projected FSType = "projected" PortworxVolume FSType = "portworxVolume" ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index d41048a5..ea801b3e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -22,253 +22,22 @@ package extensions import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupIDRange).DeepCopyInto(out.(*GroupIDRange)) - return nil - }, InType: reflect.TypeOf(&GroupIDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserIDRange).DeepCopyInto(out.(*UserIDRange)) - return nil - }, InType: reflect.TypeOf(&UserIDRange{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -398,6 +167,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -480,6 +266,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1101,17 +894,17 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = *in if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.Volumes != nil { @@ -1142,6 +935,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1390,7 +1188,7 @@ func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { if *in == nil { *out = nil } else { - *out = new(api.SELinuxOptions) + *out = new(core.SELinuxOptions) **out = **in } } @@ -1407,76 +1205,6 @@ func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { *out = *in @@ -1498,138 +1226,6 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserIDRange) DeepCopyInto(out *UserIDRange) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go index b583051c..8b013e34 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=networking.k8s.io package networking // import "k8s.io/kubernetes/pkg/apis/networking" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 2109c374..ae37fcd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -19,7 +19,7 @@ package networking import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go index da38fb5e..aa5958ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go @@ -22,58 +22,11 @@ package networking import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -268,7 +221,7 @@ func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { if *in == nil { *out = nil } else { - *out = new(api.Protocol) + *out = new(core.Protocol) **out = **in } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md index 868a37ee..b59eba40 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md @@ -4,7 +4,7 @@ The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development. #### Guidance for potential cloud providers: -* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). +* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). * In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information). * Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers. * It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved. @@ -13,4 +13,4 @@ The mechanism for supporting cloud providers is currently in transition: the or #### Some additional context on status / direction: * 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow. * Setting cloud-provider=external allows for creation of a separate controller-manager binary -* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. +* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go index 00479e9c..2e46d47d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" "k8s.io/kubernetes/pkg/controller" ) @@ -49,6 +50,11 @@ type Interface interface { HasClusterID() bool } +type InformerUser interface { + // SetInformers sets the informer on the cloud object. + SetInformers(informerFactory informers.SharedInformerFactory) +} + // Clusters is an abstract, pluggable interface for clusters of containers. type Clusters interface { // ListClusters lists the names of the available clusters. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index cb8121f6..04171762 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -28,14 +28,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" - clientgoclientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" "github.com/golang/glog" @@ -47,8 +46,8 @@ type ControllerClientBuilder interface { ConfigOrDie(name string) *restclient.Config Client(name string) (clientset.Interface, error) ClientOrDie(name string) clientset.Interface - ClientGoClient(name string) (clientgoclientset.Interface, error) - ClientGoClientOrDie(name string) clientgoclientset.Interface + ClientGoClient(name string) (clientset.Interface, error) + ClientGoClientOrDie(name string) clientset.Interface } // SimpleControllerClientBuilder returns a fixed client with different user agents @@ -86,15 +85,15 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf return client } -func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) @@ -276,15 +275,15 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface return client } -func (b SAControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 3eb1aa84..b662fa6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -42,9 +42,9 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" clientretry "k8s.io/client-go/util/retry" - _ "k8s.io/kubernetes/pkg/api/install" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/api/validation" + _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/apis/core/validation" hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -410,7 +410,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) return err } @@ -1008,7 +1008,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 197397c4..80599039 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -98,7 +98,7 @@ const ( // the API server as the certificate approaches expiration. RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate" - // owner: @msau + // owner: @msau42 // alpha: v1.7 // // A new volume type that supports local disks on a node. @@ -140,12 +140,6 @@ const ( // 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'. TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition" - // owner: @haibinxie - // alpha: v1.8 - // - // Implement IPVS-based in-cluster service load balancing - SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" - // owner: @jsafrane // alpha: v1.8 // @@ -169,6 +163,55 @@ const ( // // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" + + // owner: @jsafrane + // alpha: v1.9 + // + // Enable running mount utilities in containers. + MountContainers utilfeature.Feature = "MountContainers" + + // owner: @msau42 + // alpha: v1.9 + // + // Extend the default scheduler to be aware of PV topology and handle PV binding + // Before moving to beta, resolve Kubernetes issue #56180 + VolumeScheduling utilfeature.Feature = "VolumeScheduling" + + // owner: @vladimirvivien + // alpha: v1.9 + // + // Enable mount/attachment of Container Storage Interface (CSI) backed PVs + CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" + + // owner @MrHohn + // alpha: v1.9 + // + // Support configurable pod DNS parameters. + CustomPodDNS utilfeature.Feature = "CustomPodDNS" + + // owner: @screeley44 + // alpha: v1.9 + // + // Enable Block volume support in containers. + BlockVolume utilfeature.Feature = "BlockVolume" + + // owner: @pospispa + // + // alpha: v1.9 + // Postpone deletion of a persistent volume claim in case it is used by a pod + PVCProtection utilfeature.Feature = "PVCProtection" + + // owner: @aveshagarwal + // alpha: v1.9 + // + // Enable resource limits priority function + ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction" + + // owner: @m1093782566 + // beta: v1.9 + // + // Implement IPVS-based in-cluster service load balancing + SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" ) func init() { @@ -201,6 +244,14 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, CPUManager: {Default: false, PreRelease: utilfeature.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha}, + CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + PVCProtection: {Default: false, PreRelease: utilfeature.Alpha}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -212,6 +263,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: - apiextensionsfeatures.CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, - SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Alpha}, + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go new file mode 100644 index 00000000..400d001e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath supplies methods for extracting fields from objects +// given a path to a field. +package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath" diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go new file mode 100644 index 00000000..43754145 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/validation" +) + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + for key, value := range m { + fmtStr += fmt.Sprintf("%v=%q\n", key, value) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go index 27c42c5c..04cac264 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go @@ -25,3 +25,31 @@ const ( // NetworkReady means the runtime network is up and ready to accept containers which require network. NetworkReady = "NetworkReady" ) + +// LogStreamType is the type of the stream in CRI container log. +type LogStreamType string + +const ( + // Stdout is the stream type for stdout. + Stdout LogStreamType = "stdout" + // Stderr is the stream type for stderr. + Stderr LogStreamType = "stderr" +) + +// LogTag is the tag of a log line in CRI container log. +// Currently defined log tags: +// * First tag: Partial/End - P/E. +// The field in the container log format can be extended to include multiple +// tags by using a delimiter, but changes should be rare. If it becomes clear +// that better extensibility is desired, a more extensible format (e.g., json) +// should be adopted as a replacement and/or addition. +type LogTag string + +const ( + // LogTagPartial means the line is part of multiple lines. + LogTagPartial LogTag = "P" + // LogTagFull means the line is a single full line or the end of multiple lines. + LogTagFull LogTag = "F" + // LogTagDelimiter is the delimiter for different log tags. + LogTagDelimiter = ":" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go index df1b274f..32dbc745 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -46,8 +46,8 @@ type HandlerRunner interface { // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) - GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error) + GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. GetPodCgroupParent(pod *v1.Pod) string diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index f545b41a..2f5f4e3d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -428,10 +428,6 @@ type RunContainerOptions struct { // this directory will be used to create and mount the log file to // container.TerminationMessagePath PodContainerDir string - // The list of DNS servers for the container to use. - DNS []string - // The list of DNS search domains. - DNSSearch []string // The parent cgroup to pass to Docker CgroupParent string // The type of container rootfs @@ -450,9 +446,14 @@ type RunContainerOptions struct { type VolumeInfo struct { // Mounter is the volume's mounter Mounter volume.Mounter + // BlockVolumeMapper is the Block volume's mapper + BlockVolumeMapper volume.BlockVolumeMapper // SELinuxLabeled indicates whether this volume has had the // pod's SELinux label applied to it or not SELinuxLabeled bool + // Whether the volume permission is set to read-only or not + // This value is passed from volume.spec + ReadOnly bool } type VolumeMap map[string]VolumeInfo diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go index 42f7c681..ab28a1d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go @@ -22,6 +22,7 @@ import ( "net" "strings" + "k8s.io/apimachinery/pkg/util/sets" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -36,12 +37,18 @@ type fakeTable struct { } type fakeIPTables struct { - tables map[string]*fakeTable + tables map[string]*fakeTable + builtinChains map[string]sets.String } func NewFakeIPTables() *fakeIPTables { return &fakeIPTables{ tables: make(map[string]*fakeTable, 0), + builtinChains: map[string]sets.String{ + string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"), + string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"), + string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"), + }, } } @@ -246,6 +253,7 @@ func (f *fakeIPTables) SaveInto(tableName utiliptables.Table, buffer *bytes.Buff } func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error { + allLines := string(data) buf := bytes.NewBuffer(data) var tableName utiliptables.Table for { @@ -274,6 +282,13 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } } _, _ = f.ensureChain(tableName, chainName) + // The --noflush option for iptables-restore doesn't work for user-defined chains, only builtin chains. + // We should flush user-defined chains if the chain is not to be deleted + if !f.isBuiltinChain(tableName, chainName) && !strings.Contains(allLines, "-X "+string(chainName)) { + if err := f.FlushChain(tableName, chainName); err != nil { + return err + } + } } else if strings.HasPrefix(line, "-A") { parts := strings.Split(line, " ") if len(parts) < 3 { @@ -329,3 +344,10 @@ func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) { func (f *fakeIPTables) Destroy() { } + +func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool { + if builtinChains, ok := f.builtinChains[string(tableName)]; ok && builtinChains.Has(string(chainName)) { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go index ea53c9c8..a31b4da0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "sync" @@ -177,6 +178,8 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er chainsToRemove := []utiliptables.Chain{} for _, pm := range hostportMappings { chainsToRemove = append(chainsToRemove, getHostportChain(id, pm)) + // TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 + chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm)) } // remove rules that consists of target chains @@ -247,6 +250,16 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error // WARNING: Please do not change this function. Otherwise, HostportManager may not be able to // identify existing iptables chains. func getHostportChain(id string, pm *PortMapping) utiliptables.Chain { + hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol))) + encoded := base32.StdEncoding.EncodeToString(hash[:]) + return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) +} + +// This bugy func does bad conversion on HostPort from int32 to string. +// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833. +// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left. +// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 +func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain { hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol))) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go index 0086b745..3d7bfd6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "time" @@ -142,7 +143,7 @@ func writeLine(buf *bytes.Buffer, words ...string) { // this because IPTables Chain Names must be <= 28 chars long, and the longer // they are the harder they are to read. func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain { - hash := sha256.Sum256([]byte(string(pm.HostPort) + string(pm.Protocol) + podFullName)) + hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go index 5f872c82..919ed5a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/httpstream/spdy" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go index cb4bca04..b445c922 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/wsstream" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go index 9d488321..74f0595c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/wsstream" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 2b95b3b1..6bfc3dac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeapi "k8s.io/kubernetes/pkg/api" + kubeapi "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -49,6 +49,8 @@ const ( // Pods with the given ids have unexpected status in this source, // kubelet should reconcile status with this source RECONCILE + // Pods with the given ids have been restored from a checkpoint. + RESTORE // These constants identify the sources of pods // Updates from a file diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go index 7ea31d0a..7ee1da6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) var nodeHealthzRetryInterval = 60 * time.Second diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 786434f7..1710d989 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -41,9 +41,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -80,6 +80,9 @@ const ( // the mark-for-drop chain KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" + + // the kubernetes forward chain + kubeForwardChain utiliptables.Chain = "KUBE-FORWARD" ) // IPTablesVersioner can query the current iptables version. @@ -440,11 +443,6 @@ func NewProxier(ipt utiliptables.Interface, recorder record.EventRecorder, healthzServer healthcheck.HealthzUpdater, ) (*Proxier, error) { - // check valid user input - if minSyncPeriod > syncPeriod { - return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod) - } - // Set the route_localnet sysctl we need for if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) @@ -458,9 +456,6 @@ func NewProxier(ipt utiliptables.Interface, } // Generate the masquerade mark to use for SNAT rules. - if masqueradeBit < 0 || masqueradeBit > 31 { - return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) - } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) @@ -543,6 +538,18 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { } } + // Unlink the forwarding chain. + args = []string{ + "-m", "comment", "--comment", "kubernetes forwarding rules", + "-j", string(kubeForwardChain), + } + if err := ipt.DeleteRule(utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + if !utiliptables.IsNotFoundError(err) { + glog.Errorf("Error removing pure-iptables proxy rule: %v", err) + encounteredError = true + } + } + // Flush and remove all of our chains. iptablesData := bytes.NewBuffer(nil) if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { @@ -578,14 +585,28 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { encounteredError = true } } - { - filterBuf := bytes.NewBuffer(nil) - writeLine(filterBuf, "*filter") - writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain)) - writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain)) - writeLine(filterBuf, "COMMIT") + + // Flush and remove all of our chains. + iptablesData = bytes.NewBuffer(nil) + if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { + glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) + encounteredError = true + } else { + existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes()) + filterChains := bytes.NewBuffer(nil) + filterRules := bytes.NewBuffer(nil) + writeLine(filterChains, "*filter") + for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeForwardChain} { + if _, found := existingFilterChains[chain]; found { + chainString := string(chain) + writeLine(filterChains, existingFilterChains[chain]) + writeLine(filterRules, "-X", chainString) + } + } + writeLine(filterRules, "COMMIT") + filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) // Write it. - if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { + if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } @@ -798,7 +819,7 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S for svcPortName := range endpointsMap { for _, ep := range endpointsMap[svcPortName] { if ep.isLocal { - // If the endpoint has a bad format, ipPart() will log an + // If the endpoint has a bad format, utilproxy.IPPart() will log an // error and ep.IPPart() will return a null string. if ip := ep.IPPart(); ip != "" { nsn := svcPortName.NamespacedName @@ -1027,6 +1048,21 @@ func (proxier *Proxier) syncProxyRules() { } } + // Create and link the kube forward chain. + { + if _, err := proxier.iptables.EnsureChain(utiliptables.TableFilter, kubeForwardChain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, kubeForwardChain, err) + return + } + + comment := "kubernetes forward rules" + args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeForwardChain)} + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, err) + return + } + } + // // Below this point we will not return until we try to write the iptables rules. // @@ -1069,6 +1105,11 @@ func (proxier *Proxier) syncProxyRules() { } else { writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain)) } + if chain, ok := existingFilterChains[kubeForwardChain]; ok { + writeLine(proxier.filterChains, chain) + } else { + writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeForwardChain)) + } if chain, ok := existingNATChains[kubeServicesChain]; ok { writeLine(proxier.natChains, chain) } else { @@ -1516,6 +1557,18 @@ func (proxier *Proxier) syncProxyRules() { ) writeLine(proxier.natRules, args...) } else { + // First write session affinity rules only over local endpoints, if applicable. + if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + for _, endpointChain := range localEndpointChains { + writeLine(proxier.natRules, + "-A", string(svcXlbChain), + "-m", "comment", "--comment", svcNameString, + "-m", "recent", "--name", string(endpointChain), + "--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeSeconds), "--reap", + "-j", string(endpointChain)) + } + } + // Setup probability filter rules only over local endpoints for i, endpointChain := range localEndpointChains { // Balancing rules in the per-service chain. @@ -1562,6 +1615,40 @@ func (proxier *Proxier) syncProxyRules() { "-m", "addrtype", "--dst-type", "LOCAL", "-j", string(kubeNodePortsChain)) + // If the masqueradeMark has been added then we want to forward that same + // traffic, this allows NodePort traffic to be forwarded even if the default + // FORWARD policy is not accept. + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding rules"`, + "-m", "mark", "--mark", proxier.masqueradeMark, + "-j", "ACCEPT", + ) + + // The following rules can only be set if clusterCIDR has been defined. + if len(proxier.clusterCIDR) != 0 { + // The following two rules ensure the traffic after the initial packet + // accepted by the "kubernetes forwarding rules" rule above will be + // accepted, to be as specific as possible the traffic must be sourced + // or destined to the clusterCIDR (to/from a pod). + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-s", proxier.clusterCIDR, + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod source rule"`, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod destination rule"`, + "-d", proxier.clusterCIDR, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + } + // Write the end-of-table markers. writeLine(proxier.filterRules, "COMMIT") writeLine(proxier.natRules, "COMMIT") @@ -1609,7 +1696,7 @@ func (proxier *Proxier) syncProxyRules() { // Finish housekeeping. // TODO: these could be made more consistent. - for _, svcIP := range staleServices.List() { + for _, svcIP := range staleServices.UnsortedList() { if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go index 7806c3a1..d99d61b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go @@ -27,7 +27,7 @@ import ( // Utilities for dealing with conntrack -const noConnectionToDelete = "0 flow entries have been deleted" +const NoConnectionToDelete = "0 flow entries have been deleted" func IsIPv6(netIP net.IP) bool { return netIP != nil && netIP.To4() == nil @@ -50,7 +50,7 @@ func parametersWithFamily(isIPv6 bool, parameters ...string) []string { func ClearUDPConntrackForIP(execer exec.Interface, ip string) error { parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp") err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby-sit all udp connections to kubernetes services. @@ -84,7 +84,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro } parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port)) err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err) } return nil @@ -95,7 +95,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error { parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp") err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby sit all udp connections to kubernetes services. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go index 32e770d4..449e1126 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go @@ -19,6 +19,7 @@ package util import ( "fmt" "net" + "strconv" "github.com/golang/glog" ) @@ -32,12 +33,33 @@ func IPPart(s string) string { return s } // Must be IP:port - ip, _, err := net.SplitHostPort(s) + host, _, err := net.SplitHostPort(s) if err != nil { glog.Errorf("Error parsing '%s': %v", s, err) return "" } - return ip + // Check if host string is a valid IP address + if ip := net.ParseIP(host); ip != nil { + return ip.String() + } else { + glog.Errorf("invalid IP part '%s'", host) + } + return "" +} + +func PortPart(s string) (int, error) { + // Must be IP:port + _, port, err := net.SplitHostPort(s) + if err != nil { + glog.Errorf("Error parsing '%s': %v", s, err) + return -1, err + } + portNumber, err := strconv.Atoi(port) + if err != nil { + glog.Errorf("Error parsing '%s': %v", port, err) + return -1, err + } + return portNumber, nil } // ToCIDR returns a host address of the form /32 for diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index 81734f54..cac0140c 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -20,8 +20,8 @@ import ( "net" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go index df4dc3a9..0503c151 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go @@ -20,7 +20,7 @@ import ( "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // UserInfo returns a user.Info interface for the given namespace, service account name and UID diff --git a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index 40e6ab2f..292288d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -82,6 +82,7 @@ type Table string const ( TableNAT Table = "nat" TableFilter Table = "filter" + TableMangle Table = "mangle" ) type Chain string @@ -91,6 +92,7 @@ const ( ChainPrerouting Chain = "PREROUTING" ChainOutput Chain = "OUTPUT" ChainInput Chain = "INPUT" + ChainForward Chain = "FORWARD" ) const ( @@ -592,7 +594,7 @@ func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []st return nil } - return []string{"--wait=2"} + return []string{WaitSecondsString} } // getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go new file mode 100644 index 00000000..1dedc5b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go @@ -0,0 +1,140 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + + "github.com/golang/glog" +) + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +type execMounter struct { + wrappedMounter Interface + exec Exec +} + +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{ + wrappedMounter: wrapped, + exec: exec, + } +} + +// execMounter implements mount.Interface +var _ Interface = &execMounter{} + +// Mount runs mount(8) using given exec interface. +func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { + bind, bindRemountOpts := isBind(options) + + if bind { + err := m.doExecMount(source, target, fstype, []string{"bind"}) + if err != nil { + return err + } + return m.doExecMount(source, target, fstype, bindRemountOpts) + } + + return m.doExecMount(source, target, fstype, options) +} + +// doExecMount calls exec(mount ) using given exec interface. +func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { + glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + mountArgs := makeMountArgs(source, target, fstype, options) + output, err := m.exec.Run("mount", mountArgs...) + glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + if err != nil { + return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", + err, "mount", source, target, fstype, options, string(output)) + } + + return err +} + +// Unmount runs umount(8) using given exec interface. +func (m *execMounter) Unmount(target string) error { + outputBytes, err := m.exec.Run("umount", target) + if err == nil { + glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + } else { + glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + } + + return err +} + +// List returns a list of all mounted filesystems. +func (m *execMounter) List() ([]MountPoint, error) { + return m.wrappedMounter.List() +} + +// IsLikelyNotMountPoint determines whether a path is a mountpoint. +func (m *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return m.wrappedMounter.IsLikelyNotMountPoint(file) +} + +// DeviceOpened checks if block device in use by calling Open with O_EXCL flag. +// Returns true if open returns errno EBUSY, and false if errno is nil. +// Returns an error if errno is any error other than EBUSY. +// Returns with error if pathname is not a device. +func (m *execMounter) DeviceOpened(pathname string) (bool, error) { + return m.wrappedMounter.DeviceOpened(pathname) +} + +// PathIsDevice uses FileInfo returned from os.Stat to check if path refers +// to a device. +func (m *execMounter) PathIsDevice(pathname string) (bool, error) { + return m.wrappedMounter.PathIsDevice(pathname) +} + +//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts +func (m *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return m.wrappedMounter.GetDeviceNameFromMount(mountPath, pluginDir) +} + +func (m *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return m.wrappedMounter.IsMountPointMatch(mp, dir) +} + +func (m *execMounter) IsNotMountPoint(dir string) (bool, error) { + return m.wrappedMounter.IsNotMountPoint(dir) +} + +func (m *execMounter) MakeRShared(path string) error { + return m.wrappedMounter.MakeRShared(path) +} + +func (m *execMounter) GetFileType(pathname string) (FileType, error) { + return m.wrappedMounter.GetFileType(pathname) +} + +func (m *execMounter) MakeFile(pathname string) error { + return m.wrappedMounter.MakeFile(pathname) +} + +func (m *execMounter) MakeDir(pathname string) error { + return m.wrappedMounter.MakeDir(pathname) +} + +func (m *execMounter) ExistsPath(pathname string) bool { + return m.wrappedMounter.ExistsPath(pathname) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go new file mode 100644 index 00000000..136704b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go @@ -0,0 +1,87 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "errors" +) + +type execMounter struct{} + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{} +} + +func (mounter *execMounter) Mount(source string, target string, fstype string, options []string) error { + return nil +} + +func (mounter *execMounter) Unmount(target string) error { + return nil +} + +func (mounter *execMounter) List() ([]MountPoint, error) { + return []MountPoint{}, nil +} + +func (mounter *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mounter *execMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + +func (mounter *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return "", nil +} + +func (mounter *execMounter) DeviceOpened(pathname string) (bool, error) { + return false, nil +} + +func (mounter *execMounter) PathIsDevice(pathname string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) MakeRShared(path string) error { + return nil +} + +func (mounter *execMounter) GetFileType(pathname string) (FileType, error) { + return FileType("fake"), errors.New("not implemented") +} + +func (mounter *execMounter) MakeDir(pathname string) error { + return nil +} + +func (mounter *execMounter) MakeFile(pathname string) error { + return nil +} + +func (mounter *execMounter) ExistsPath(pathname string) bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 9caec670..71064f32 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -498,7 +498,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or // it contains an unexpected filesystem. - existingFormat, err := mounter.getDiskFormat(source) + existingFormat, err := mounter.GetDiskFormat(source) if err != nil { return err } @@ -536,8 +536,8 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, return mountErr } -// getDiskFormat uses 'lsblk' to see if the given disk is unformated -func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) { +// GetDiskFormat uses 'lsblk' to see if the given disk is unformated +func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-n", "-o", "FSTYPE", disk} glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("lsblk", args...) diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go index 244c22c0..87d1e374 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go @@ -86,7 +86,7 @@ func (mounter *Mounter) MakeRShared(path string) error { } func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - return nil + return mounter.Interface.Mount(source, target, fstype, options) } func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go index 1a10939b..a970bf7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go +++ b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go @@ -60,3 +60,9 @@ func Int32PtrDerefOr(ptr *int32, def int32) int32 { } return def } + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + o := b + return &o +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go index a86839b5..76e4bb86 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -25,8 +25,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 1ec55d23..42ceaa40 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -208,6 +208,26 @@ type ExpandableVolumePlugin interface { RequiresFSResize() bool } +// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support. +type BlockVolumePlugin interface { + VolumePlugin + // NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. + // Ownership of the spec pointer in *not* transferred. + // - spec: The v1.Volume spec + // - pod: The enclosing pod + NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error) + // NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state. + // - name: The volume name, as per the v1.Volume spec. + // - podUID: The UID of the enclosing pod + NewBlockVolumeUnmapper(name string, podUID types.UID) (BlockVolumeUnmapper, error) + // ConstructBlockVolumeSpec constructs a volume spec based on the given + // podUID, volume name and a pod device map path. + // The spec may have incomplete information due to limited information + // from input. This function is used by volume manager to reconstruct + // volume spec by reading the volume directories from disk. + ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error) +} + // VolumeHost is an interface that plugins can use to access the kubelet. type VolumeHost interface { // GetPluginDir returns the absolute path to a directory under which @@ -216,6 +236,11 @@ type VolumeHost interface { // GetPodPluginDir(). GetPluginDir(pluginName string) string + // GetVolumeDevicePluginDir returns the absolute path to a directory + // under which a given plugin may store data. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/ + GetVolumeDevicePluginDir(pluginName string) string + // GetPodVolumeDir returns the absolute path a directory which // represents the named volume under the named plugin for the given // pod. If the specified pod does not exist, the result of this call @@ -228,6 +253,13 @@ type VolumeHost interface { // directory might not actually exist on disk yet. GetPodPluginDir(podUID types.UID, pluginName string) string + // GetPodVolumeDeviceDir returns the absolute path a directory which + // represents the named plugin for the given pod. + // If the specified pod does not exist, the result of this call + // might not exist. + // ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/ + GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string + // GetKubeClient returns a client interface GetKubeClient() clientset.Interface @@ -271,6 +303,9 @@ type VolumeHost interface { // Returns the labels on the node GetNodeLabels() (map[string]string, error) + + // Returns the name of the node + GetNodeName() types.NodeName } // VolumePluginMgr tracks registered plugins. @@ -675,6 +710,32 @@ func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVo return nil, nil } +// FindMapperPluginBySpec fetches a block volume plugin by spec. +func (pm *VolumePluginMgr) FindMapperPluginBySpec(spec *Spec) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginBySpec(spec) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + +// FindMapperPluginByName fetches a block volume plugin by name. +func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/error.go b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go new file mode 100644 index 00000000..2c965586 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + k8stypes "k8s.io/apimachinery/pkg/types" +) + +// This error on attach indicates volume is attached to a different node +// than we expected. +type DanglingAttachError struct { + msg string + CurrentNode k8stypes.NodeName + DevicePath string +} + +func (err *DanglingAttachError) Error() string { + return err.msg +} + +func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { + return &DanglingAttachError{ + msg: msg, + CurrentNode: node, + DevicePath: devicePath, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go new file mode 100644 index 00000000..84631545 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" +) + +const ( + // Name of finalizer on PVCs that have a running pod. + PVCProtectionFinalizer = "kubernetes.io/pvc-protection" +) + +// IsPVCBeingDeleted returns: +// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set +// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil +func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool { + return pvc.ObjectMeta.DeletionTimestamp != nil +} + +// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is +// present among the pvc.Finalizers +func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool { + for _, finalizer := range pvc.Finalizers { + if finalizer == PVCProtectionFinalizer { + return true + } + } + return false +} + +// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case +// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not +// informer's cached copy.) +func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + newFinalizers := make([]string, 0) + for _, finalizer := range pvc.Finalizers { + if finalizer != PVCProtectionFinalizer { + newFinalizers = append(newFinalizers, finalizer) + } + } + if len(newFinalizers) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newFinalizers = nil + } + pvc.Finalizers = newFinalizers +} + +// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that +// pvc is writable (i.e. is not informer's cached copy.) +func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index ad4ac6a0..106036df 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "os" "path" - + "path/filepath" "strings" "github.com/golang/glog" @@ -30,15 +30,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/api/legacyscheme" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" ) -const readyFileName = "ready" +const ( + readyFileName = "ready" + losetupPath = "losetup" + + ErrDeviceNotFound = "device not found" + ErrDeviceNotSupported = "device not supported" + ErrNotAvailable = "not available" +) // IsReady checks for the existence of a regular file // called 'ready' in the given directory and returns @@ -233,7 +241,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) { } pod := &v1.Pod{} - codec := legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion) + codec := legacyscheme.Codecs.UniversalDecoder() if err := runtime.DecodeInto(codec, podDef, pod); err != nil { return nil, fmt.Errorf("failed decoding file: %v", err) } @@ -270,3 +278,201 @@ func stringToSet(str, delimiter string) (sets.String, error) { } return zonesSet, nil } + +// BlockVolumePathHandler defines a set of operations for handling block volume-related operations +type BlockVolumePathHandler interface { + // MapDevice creates a symbolic link to block device under specified map path + MapDevice(devicePath string, mapPath string, linkName string) error + // UnmapDevice removes a symbolic link to block device under specified map path + UnmapDevice(mapPath string, linkName string) error + // RemovePath removes a file or directory on specified map path + RemoveMapPath(mapPath string) error + // IsSymlinkExist retruns true if specified symbolic link exists + IsSymlinkExist(mapPath string) (bool, error) + // GetDeviceSymlinkRefs searches symbolic links under global map path + GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) + // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath + // corresponding to map path symlink, and then return global map path with pod uuid. + FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) + // AttachFileDevice takes a path to a regular file and makes it available as an + // attached block device. + AttachFileDevice(path string) (string, error) + // GetLoopDevice returns the full path to the loop device associated with the given path. + GetLoopDevice(path string) (string, error) + // RemoveLoopDevice removes specified loopback device + RemoveLoopDevice(device string) error +} + +// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. +func NewBlockVolumePathHandler() BlockVolumePathHandler { + var volumePathHandler VolumePathHandler + return volumePathHandler +} + +// VolumePathHandler is path related operation handlers for block volume +type VolumePathHandler struct { +} + +// MapDevice creates a symbolic link to block device under specified map path +func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { + // Example of global map path: + // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} + // linkName: {podUid} + // + // Example of pod device map path: + // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + // linkName: {volumeName} + if len(devicePath) == 0 { + return fmt.Errorf("Failed to map device to map path. devicePath is empty") + } + if len(mapPath) == 0 { + return fmt.Errorf("Failed to map device to map path. mapPath is empty") + } + if !filepath.IsAbs(mapPath) { + return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) + } + glog.V(5).Infof("MapDevice: devicePath %s", devicePath) + glog.V(5).Infof("MapDevice: mapPath %s", mapPath) + glog.V(5).Infof("MapDevice: linkName %s", linkName) + + // Check and create mapPath + _, err := os.Stat(mapPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate map path: %s", mapPath) + return err + } + if err = os.MkdirAll(mapPath, 0750); err != nil { + return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) + } + // Remove old symbolic link(or file) then create new one. + // This should be done because current symbolic link is + // stale accross node reboot. + linkPath := path.Join(mapPath, string(linkName)) + if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return err + } + err = os.Symlink(devicePath, linkPath) + return err +} + +// UnmapDevice removes a symbolic link associated to block device under specified map path +func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") + } + glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + + // Check symbolic link exists + linkPath := path.Join(mapPath, string(linkName)) + if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { + return checkErr + } else if !islinkExist { + glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + return nil + } + err := os.Remove(linkPath) + return err +} + +// RemoveMapPath removes a file or directory on specified map path +func (v VolumePathHandler) RemoveMapPath(mapPath string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to remove map path. mapPath is empty") + } + glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + err := os.RemoveAll(mapPath) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// IsSymlinkExist returns true if specified file exists and the type is symbolik link. +// If file doesn't exist, or file exists but not symbolick link, return false with no error. +// On other cases, return false with error from Lstat(). +func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { + fi, err := os.Lstat(mapPath) + if err == nil { + // If file exits and it's symbolick link, return true and no error + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return true, nil + } + // If file exits but it's not symbolick link, return fale and no error + return false, nil + } + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil + } + // Return error from Lstat() + return false, err +} + +// GetDeviceSymlinkRefs searches symbolic links under global map path +func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { + var refs []string + files, err := ioutil.ReadDir(mapPath) + if err != nil { + return nil, fmt.Errorf("Directory cannot read %v", err) + } + for _, file := range files { + if file.Mode()&os.ModeSymlink != os.ModeSymlink { + continue + } + filename := file.Name() + filepath, err := os.Readlink(path.Join(mapPath, filename)) + if err != nil { + return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) + } + glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + if filepath == devPath { + refs = append(refs, path.Join(mapPath, filename)) + } + } + glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + return refs, nil +} + +// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX +// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + var globalMapPathUUID string + // Find symbolic link named pod uuid under plugin dir + err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { + glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + if res, err := compareSymlinks(path, mapPath); err == nil && res { + globalMapPathUUID = path + } + } + return nil + }) + if err != nil { + return "", err + } + glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + // Return path contains global map path + {pod uuid} + return globalMapPathUUID, nil +} + +func compareSymlinks(global, pod string) (bool, error) { + devGlobal, err := os.Readlink(global) + if err != nil { + return false, err + } + devPod, err := os.Readlink(pod) + if err != nil { + return false, err + } + glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + if devGlobal == devPod { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go new file mode 100644 index 00000000..755a10f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go @@ -0,0 +1,106 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/golang/glog" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + blockDevicePath, err := v.GetLoopDevice(path) + if err != nil && err.Error() != ErrDeviceNotFound { + return "", err + } + + // If no existing loop device for the path, create one + if blockDevicePath == "" { + glog.V(4).Infof("Creating device for path: %s", path) + blockDevicePath, err = makeLoopDevice(path) + if err != nil { + return "", err + } + } + return blockDevicePath, nil +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return "", errors.New(ErrNotAvailable) + } + if err != nil { + return "", fmt.Errorf("not attachable: %v", err) + } + + args := []string{"-j", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device discover command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +func makeLoopDevice(path string) (string, error) { + args := []string{"-f", "--show", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device create command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + args := []string{"-d", device} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + if !strings.Contains(string(out), "No such device or address") { + return err + } + } + return nil +} + +func parseLosetupOutputForDevice(output []byte) (string, error) { + if len(output) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + + // losetup returns device in the format: + // /dev/loop1: [0073]:148662 (/dev/sda) + device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0]) + if len(device) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + return device, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go new file mode 100644 index 00000000..930e4f66 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go @@ -0,0 +1,39 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + return "", fmt.Errorf("AttachFileDevice not supported for this build.") +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + return "", fmt.Errorf("GetLoopDevice not supported for this build.") +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + return fmt.Errorf("RemoveLoopDevice not supported for this build.") +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go index 78cb21a3..47196355 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -37,6 +37,19 @@ type Volume interface { MetricsProvider } +// BlockVolume interface provides methods to generate global map path +// and pod device map path. +type BlockVolume interface { + // GetGlobalMapPath returns a global map path which contains + // symbolic links associated to a block device. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} + GetGlobalMapPath(spec *Spec) (string, error) + // GetPodDeviceMapPath returns a pod device map path + // and name of a symbolic link associated to a block device. + // ex. pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + GetPodDeviceMapPath() (string, string) +} + // MetricsProvider exposes metrics (e.g. used,available space) related to a // Volume. type MetricsProvider interface { @@ -132,6 +145,34 @@ type Unmounter interface { TearDownAt(dir string) error } +// BlockVolumeMapper interface provides methods to set up/map the volume. +type BlockVolumeMapper interface { + BlockVolume + // SetUpDevice prepares the volume to a self-determined directory path, + // which may or may not exist yet and returns combination of physical + // device path of a block volume and error. + // If the plugin is non-attachable, it should prepare the device + // in /dev/ (or where appropriate) and return unique device path. + // Unique device path across kubelet node reboot is required to avoid + // unexpected block volume destruction. + // If the plugin is attachable, it should not do anything here, + // just return empty string for device path. + // Instead, attachable plugin have to return unique device path + // at attacher.Attach() and attacher.WaitForAttach(). + // This may be called more than once, so implementations must be idempotent. + SetUpDevice() (string, error) +} + +// BlockVolumeUnmapper interface provides methods to cleanup/unmap the volumes. +type BlockVolumeUnmapper interface { + BlockVolume + // TearDownDevice removes traces of the SetUpDevice procedure under + // a self-determined directory. + // If the plugin is non-attachable, this method detaches the volume + // from a node. + TearDownDevice(mapPath string, devicePath string) error +} + // Provisioner is an interface that creates templates for PersistentVolumes // and can create the volume as a new resource in the infrastructure provider. type Provisioner interface { @@ -195,8 +236,10 @@ type BulkVolumeVerifier interface { // Detacher can detach a volume from a node. type Detacher interface { - // Detach the given device from the node with the given Name. - Detach(deviceName string, nodeName types.NodeName) error + // Detach the given volume from the node with the given Name. + // volumeName is name of the volume as returned from plugin's + // GetVolumeName(). + Detach(volumeName string, nodeName types.NodeName) error // UnmountDevice unmounts the global mount of the disk. This // should only be called once all bind mounts have been From f317ffce5bdcc960656aa9c11a82215265d15405 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Mon, 11 Dec 2017 16:45:48 +0100 Subject: [PATCH 03/59] vendor: bump to kube 1.10/master Signed-off-by: Antonio Murdaca --- contrib/test/integration/main.yml | 3 +- vendor.conf | 4 +- .../api/admissionregistration/v1alpha1/doc.go | 4 +- .../v1alpha1/generated.pb.go | 1284 +--- .../v1alpha1/generated.proto | 96 +- .../v1alpha1/register.go | 2 - .../admissionregistration/v1alpha1/types.go | 117 - .../v1alpha1/types_swagger_doc_generated.go | 62 - .../v1alpha1/zz_generated.deepcopy.go | 216 - .../api/admissionregistration/v1beta1/doc.go | 25 + .../v1beta1/generated.pb.go | 2150 +++++++ .../v1beta1/generated.proto | 261 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 281 + .../v1beta1/types_swagger_doc_generated.go | 125 + .../v1beta1/zz_generated.deepcopy.go | 321 + vendor/k8s.io/api/apps/v1/doc.go | 2 +- vendor/k8s.io/api/apps/v1/generated.pb.go | 5553 +++++++++++++++- vendor/k8s.io/api/apps/v1/generated.proto | 524 +- vendor/k8s.io/api/apps/v1/register.go | 10 +- vendor/k8s.io/api/apps/v1/types.go | 624 +- .../apps/v1/types_swagger_doc_generated.go | 267 +- .../api/apps/v1/zz_generated.deepcopy.go | 696 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 2 +- .../k8s.io/api/apps/v1beta1/generated.pb.go | 585 +- .../k8s.io/api/apps/v1beta1/generated.proto | 28 + vendor/k8s.io/api/apps/v1beta1/register.go | 2 +- vendor/k8s.io/api/apps/v1beta1/types.go | 26 + .../v1beta1/types_swagger_doc_generated.go | 14 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 119 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 2 +- .../k8s.io/api/apps/v1beta2/generated.pb.go | 1194 +++- .../k8s.io/api/apps/v1beta2/generated.proto | 76 +- vendor/k8s.io/api/apps/v1beta2/register.go | 2 +- vendor/k8s.io/api/apps/v1beta2/types.go | 82 +- .../v1beta2/types_swagger_doc_generated.go | 46 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 179 +- vendor/k8s.io/api/authentication/v1/doc.go | 2 +- .../api/authentication/v1/generated.proto | 1 + .../k8s.io/api/authentication/v1/register.go | 2 +- .../v1/zz_generated.deepcopy.go | 31 - .../k8s.io/api/authentication/v1beta1/doc.go | 2 +- .../authentication/v1beta1/generated.proto | 1 + .../api/authentication/v1beta1/register.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 31 - vendor/k8s.io/api/authorization/v1/doc.go | 2 +- .../api/authorization/v1/generated.pb.go | 172 +- .../api/authorization/v1/generated.proto | 10 +- .../k8s.io/api/authorization/v1/register.go | 2 +- vendor/k8s.io/api/authorization/v1/types.go | 8 +- .../v1/types_swagger_doc_generated.go | 3 +- .../authorization/v1/zz_generated.deepcopy.go | 67 - .../k8s.io/api/authorization/v1beta1/doc.go | 2 +- .../api/authorization/v1beta1/generated.pb.go | 177 +- .../api/authorization/v1beta1/generated.proto | 10 +- .../api/authorization/v1beta1/register.go | 2 +- .../k8s.io/api/authorization/v1beta1/types.go | 8 +- .../v1beta1/types_swagger_doc_generated.go | 3 +- .../v1beta1/zz_generated.deepcopy.go | 67 - vendor/k8s.io/api/autoscaling/v1/doc.go | 2 +- .../k8s.io/api/autoscaling/v1/generated.proto | 1 + vendor/k8s.io/api/autoscaling/v1/register.go | 2 +- .../autoscaling/v1/zz_generated.deepcopy.go | 83 - vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 2 +- .../api/autoscaling/v2beta1/generated.proto | 1 + .../api/autoscaling/v2beta1/register.go | 2 +- .../v2beta1/zz_generated.deepcopy.go | 71 - vendor/k8s.io/api/batch/v1/doc.go | 2 +- vendor/k8s.io/api/batch/v1/generated.proto | 1 + vendor/k8s.io/api/batch/v1/register.go | 2 +- .../api/batch/v1/zz_generated.deepcopy.go | 35 - vendor/k8s.io/api/batch/v1beta1/doc.go | 2 +- .../k8s.io/api/batch/v1beta1/generated.proto | 6 +- vendor/k8s.io/api/batch/v1beta1/register.go | 2 +- vendor/k8s.io/api/batch/v1beta1/types.go | 5 +- .../v1beta1/types_swagger_doc_generated.go | 2 +- .../batch/v1beta1/zz_generated.deepcopy.go | 39 - vendor/k8s.io/api/batch/v2alpha1/doc.go | 2 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 6 +- vendor/k8s.io/api/batch/v2alpha1/register.go | 2 +- vendor/k8s.io/api/batch/v2alpha1/types.go | 5 +- .../v2alpha1/types_swagger_doc_generated.go | 2 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 39 - vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 +- .../api/certificates/v1beta1/generated.proto | 1 + .../api/certificates/v1beta1/register.go | 2 +- .../v1beta1/zz_generated.deepcopy.go | 35 - .../api/core/v1/annotation_key_constants.go | 6 - vendor/k8s.io/api/core/v1/doc.go | 2 +- vendor/k8s.io/api/core/v1/generated.pb.go | 4741 ++++++++++---- vendor/k8s.io/api/core/v1/generated.proto | 259 +- vendor/k8s.io/api/core/v1/register.go | 2 +- vendor/k8s.io/api/core/v1/types.go | 235 +- .../core/v1/types_swagger_doc_generated.go | 126 +- .../api/core/v1/zz_generated.deepcopy.go | 943 +-- .../events/v1beta1}/doc.go | 8 +- .../k8s.io/api/events/v1beta1/generated.pb.go | 1306 ++++ .../k8s.io/api/events/v1beta1/generated.proto | 122 + vendor/k8s.io/api/events/v1beta1/register.go | 53 + vendor/k8s.io/api/events/v1beta1/types.go | 122 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 127 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 2 +- .../api/extensions/v1beta1/generated.pb.go | 2071 +++--- .../api/extensions/v1beta1/generated.proto | 104 +- .../k8s.io/api/extensions/v1beta1/register.go | 6 +- vendor/k8s.io/api/extensions/v1beta1/types.go | 123 +- .../v1beta1/types_swagger_doc_generated.go | 81 +- .../v1beta1/zz_generated.deepcopy.go | 428 +- vendor/k8s.io/api/networking/v1/doc.go | 2 +- .../k8s.io/api/networking/v1/generated.proto | 1 + vendor/k8s.io/api/networking/v1/register.go | 2 +- .../networking/v1/zz_generated.deepcopy.go | 47 - vendor/k8s.io/api/policy/v1beta1/doc.go | 2 +- .../k8s.io/api/policy/v1beta1/generated.proto | 1 + vendor/k8s.io/api/policy/v1beta1/register.go | 2 +- .../policy/v1beta1/zz_generated.deepcopy.go | 35 - vendor/k8s.io/api/rbac/v1/doc.go | 2 +- vendor/k8s.io/api/rbac/v1/generated.pb.go | 348 +- vendor/k8s.io/api/rbac/v1/generated.proto | 16 + vendor/k8s.io/api/rbac/v1/register.go | 2 +- vendor/k8s.io/api/rbac/v1/types.go | 14 + .../rbac/v1/types_swagger_doc_generated.go | 16 +- .../api/rbac/v1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 +- .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 350 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 15 + vendor/k8s.io/api/rbac/v1alpha1/register.go | 2 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 14 + .../v1alpha1/types_swagger_doc_generated.go | 16 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 +- .../k8s.io/api/rbac/v1beta1/generated.pb.go | 349 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 16 + vendor/k8s.io/api/rbac/v1beta1/register.go | 2 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 13 + .../v1beta1/types_swagger_doc_generated.go | 16 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 84 +- vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 +- .../api/scheduling/v1alpha1/generated.proto | 1 + .../api/scheduling/v1alpha1/register.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 23 - vendor/k8s.io/api/settings/v1alpha1/doc.go | 2 +- .../api/settings/v1alpha1/generated.proto | 1 + .../k8s.io/api/settings/v1alpha1/register.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 27 - vendor/k8s.io/api/storage/v1/doc.go | 2 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 120 +- vendor/k8s.io/api/storage/v1/generated.proto | 10 +- vendor/k8s.io/api/storage/v1/register.go | 2 +- vendor/k8s.io/api/storage/v1/types.go | 22 + .../storage/v1/types_swagger_doc_generated.go | 1 + .../api/storage/v1/zz_generated.deepcopy.go | 32 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 20 + .../api/storage/v1alpha1/generated.pb.go | 1523 +++++ .../api/storage/v1alpha1/generated.proto | 128 + .../k8s.io/api/storage/v1alpha1/register.go | 50 + vendor/k8s.io/api/storage/v1alpha1/types.go | 126 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 188 + vendor/k8s.io/api/storage/v1beta1/doc.go | 2 +- .../api/storage/v1beta1/generated.pb.go | 119 +- .../api/storage/v1beta1/generated.proto | 10 +- vendor/k8s.io/api/storage/v1beta1/register.go | 2 +- vendor/k8s.io/api/storage/v1beta1/types.go | 22 + .../v1beta1/types_swagger_doc_generated.go | 1 + .../storage/v1beta1/zz_generated.deepcopy.go | 32 +- .../k8s.io/apiextensions-apiserver/README.md | 2 +- .../pkg/features/kube_features.go | 3 +- .../apimachinery/pkg/api/errors/errors.go | 33 +- .../apimachinery/pkg/api/meta/interfaces.go | 3 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 121 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 19 +- .../apimachinery/pkg/api/meta/unstructured.go | 18 +- .../pkg/api/resource/zz_generated.deepcopy.go | 17 - .../pkg/apimachinery/registered/registered.go | 2 +- .../apimachinery/pkg/apimachinery/types.go | 2 +- .../internalversion/zz_generated.deepcopy.go | 18 - .../pkg/apis/meta/v1/conversion.go | 7 + .../pkg/apis/meta/v1/generated.pb.go | 378 +- .../pkg/apis/meta/v1/generated.proto | 31 +- .../apimachinery/pkg/apis/meta/v1/register.go | 2 - .../apimachinery/pkg/apis/meta/v1/types.go | 23 +- .../meta/v1/types_swagger_doc_generated.go | 4 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 453 ++ .../apis/meta/v1/unstructured/unstructured.go | 607 +- .../meta/v1/unstructured/unstructured_list.go | 189 + .../v1/unstructured/zz_generated.deepcopy.go | 18 - .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 154 - .../meta/v1alpha1/zz_generated.deepcopy.go | 38 - .../apimachinery/pkg/conversion/cloner.go | 249 - .../pkg/labels/zz_generated.deepcopy.go | 17 - .../k8s.io/apimachinery/pkg/runtime/codec.go | 16 +- .../unstructured => runtime}/converter.go | 65 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 4 +- .../apimachinery/pkg/runtime/interfaces.go | 6 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 180 +- .../pkg/runtime/serializer/json/json.go | 3 +- .../pkg/runtime/zz_generated.deepcopy.go | 25 - .../pkg/util/mergepatch/errors.go | 1 + .../k8s.io/apimachinery/pkg/util/net/http.go | 12 +- .../pkg/util/strategicpatch/errors.go | 49 + .../pkg/util/strategicpatch/meta.go | 194 + .../pkg/util/strategicpatch/patch.go | 320 +- .../pkg/util/strategicpatch/types.go | 193 + .../pkg/watch/zz_generated.deepcopy.go | 17 - .../third_party/forked/golang/json/fields.go | 9 +- .../client-go/discovery/unstructured.go | 66 +- .../admissionregistration/interface.go | 54 + .../v1alpha1/initializerconfiguration.go | 87 + .../v1alpha1/interface.go | 45 + .../v1beta1/interface.go | 52 + .../v1beta1/mutatingwebhookconfiguration.go | 87 + .../v1beta1/validatingwebhookconfiguration.go | 87 + .../client-go/informers/apps/interface.go | 62 + .../informers/apps/v1/controllerrevision.go | 88 + .../client-go/informers/apps/v1/daemonset.go | 88 + .../client-go/informers/apps/v1/deployment.go | 88 + .../client-go/informers/apps/v1/interface.go | 73 + .../client-go/informers/apps/v1/replicaset.go | 88 + .../informers/apps/v1/statefulset.go | 88 + .../apps/v1beta1/controllerrevision.go | 88 + .../informers/apps/v1beta1/deployment.go | 88 + .../informers/apps/v1beta1/interface.go | 59 + .../informers/apps/v1beta1/statefulset.go | 88 + .../apps/v1beta2/controllerrevision.go | 88 + .../informers/apps/v1beta2/daemonset.go | 88 + .../informers/apps/v1beta2/deployment.go | 88 + .../informers/apps/v1beta2/interface.go | 73 + .../informers/apps/v1beta2/replicaset.go | 88 + .../informers/apps/v1beta2/statefulset.go | 88 + .../informers/autoscaling/interface.go | 54 + .../autoscaling/v1/horizontalpodautoscaler.go | 88 + .../informers/autoscaling/v1/interface.go | 45 + .../v2beta1/horizontalpodautoscaler.go | 88 + .../autoscaling/v2beta1/interface.go | 45 + .../client-go/informers/batch/interface.go | 62 + .../client-go/informers/batch/v1/interface.go | 45 + .../client-go/informers/batch/v1/job.go | 88 + .../informers/batch/v1beta1/cronjob.go | 88 + .../informers/batch/v1beta1/interface.go | 45 + .../informers/batch/v2alpha1/cronjob.go | 88 + .../informers/batch/v2alpha1/interface.go | 45 + .../informers/certificates/interface.go | 46 + .../v1beta1/certificatesigningrequest.go | 87 + .../certificates/v1beta1/interface.go | 45 + .../client-go/informers/core/interface.go | 46 + .../informers/core/v1/componentstatus.go | 87 + .../client-go/informers/core/v1/configmap.go | 88 + .../client-go/informers/core/v1/endpoints.go | 88 + .../client-go/informers/core/v1/event.go | 88 + .../client-go/informers/core/v1/interface.go | 150 + .../client-go/informers/core/v1/limitrange.go | 88 + .../client-go/informers/core/v1/namespace.go | 87 + .../client-go/informers/core/v1/node.go | 87 + .../informers/core/v1/persistentvolume.go | 87 + .../core/v1/persistentvolumeclaim.go | 88 + .../k8s.io/client-go/informers/core/v1/pod.go | 88 + .../informers/core/v1/podtemplate.go | 88 + .../core/v1/replicationcontroller.go | 88 + .../informers/core/v1/resourcequota.go | 88 + .../client-go/informers/core/v1/secret.go | 88 + .../client-go/informers/core/v1/service.go | 88 + .../informers/core/v1/serviceaccount.go | 88 + .../client-go/informers/events/interface.go | 46 + .../informers/events/v1beta1/event.go | 88 + .../informers/events/v1beta1/interface.go | 45 + .../informers/extensions/interface.go | 46 + .../informers/extensions/v1beta1/daemonset.go | 88 + .../extensions/v1beta1/deployment.go | 88 + .../informers/extensions/v1beta1/ingress.go | 88 + .../informers/extensions/v1beta1/interface.go | 73 + .../extensions/v1beta1/podsecuritypolicy.go | 87 + .../extensions/v1beta1/replicaset.go | 88 + vendor/k8s.io/client-go/informers/factory.go | 208 + vendor/k8s.io/client-go/informers/generic.go | 254 + .../internalinterfaces/factory_interfaces.go | 37 + .../informers/networking/interface.go | 46 + .../informers/networking/v1/interface.go | 45 + .../informers/networking/v1/networkpolicy.go | 88 + .../client-go/informers/policy/interface.go | 46 + .../informers/policy/v1beta1/interface.go | 45 + .../policy/v1beta1/poddisruptionbudget.go | 88 + .../client-go/informers/rbac/interface.go | 62 + .../informers/rbac/v1/clusterrole.go | 87 + .../informers/rbac/v1/clusterrolebinding.go | 87 + .../client-go/informers/rbac/v1/interface.go | 66 + .../client-go/informers/rbac/v1/role.go | 88 + .../informers/rbac/v1/rolebinding.go | 88 + .../informers/rbac/v1alpha1/clusterrole.go | 87 + .../rbac/v1alpha1/clusterrolebinding.go | 87 + .../informers/rbac/v1alpha1/interface.go | 66 + .../client-go/informers/rbac/v1alpha1/role.go | 88 + .../informers/rbac/v1alpha1/rolebinding.go | 88 + .../informers/rbac/v1beta1/clusterrole.go | 87 + .../rbac/v1beta1/clusterrolebinding.go | 87 + .../informers/rbac/v1beta1/interface.go | 66 + .../client-go/informers/rbac/v1beta1/role.go | 88 + .../informers/rbac/v1beta1/rolebinding.go | 88 + .../informers/scheduling/interface.go | 46 + .../scheduling/v1alpha1/interface.go | 45 + .../scheduling/v1alpha1/priorityclass.go | 87 + .../client-go/informers/settings/interface.go | 46 + .../informers/settings/v1alpha1/interface.go | 45 + .../informers/settings/v1alpha1/podpreset.go | 88 + .../client-go/informers/storage/interface.go | 62 + .../informers/storage/v1/interface.go | 45 + .../informers/storage/v1/storageclass.go | 87 + .../informers/storage/v1alpha1/interface.go | 45 + .../storage/v1alpha1/volumeattachment.go | 87 + .../informers/storage/v1beta1/interface.go | 45 + .../informers/storage/v1beta1/storageclass.go | 87 + .../k8s.io/client-go/kubernetes/clientset.go | 56 +- .../client-go/kubernetes/scheme/register.go | 6 + .../v1alpha1/admissionregistration_client.go | 5 - .../externaladmissionhookconfiguration.go | 145 - .../v1alpha1/generated_expansion.go | 2 - .../v1beta1/admissionregistration_client.go | 93 + .../admissionregistration/v1beta1/doc.go | 18 + .../v1beta1/generated_expansion.go | 21 + .../v1beta1/mutatingwebhookconfiguration.go | 145 + .../v1beta1/validatingwebhookconfiguration.go | 145 + .../kubernetes/typed/apps/v1/apps_client.go | 20 + .../typed/apps/v1/controllerrevision.go | 155 + .../kubernetes/typed/apps/v1/deployment.go | 172 + .../typed/apps/v1/generated_expansion.go | 8 + .../kubernetes/typed/apps/v1/replicaset.go | 172 + .../kubernetes/typed/apps/v1/statefulset.go | 172 + .../kubernetes/typed/events/v1beta1/doc.go | 18 + .../kubernetes/typed/events/v1beta1/event.go | 155 + .../typed/events/v1beta1/events_client.go | 88 + .../events/v1beta1/generated_expansion.go | 19 + .../extensions/v1beta1/extensions_client.go | 5 - .../extensions/v1beta1/generated_expansion.go | 2 - .../extensions/v1beta1/thirdpartyresource.go | 145 - .../kubernetes/typed/storage/v1alpha1/doc.go | 18 + .../storage/v1alpha1/generated_expansion.go | 19 + .../typed/storage/v1alpha1/storage_client.go | 88 + .../storage/v1alpha1/volumeattachment.go | 161 + .../v1alpha1/expansion_generated.go | 23 + .../v1alpha1/initializerconfiguration.go | 65 + .../v1beta1/expansion_generated.go | 27 + .../v1beta1/mutatingwebhookconfiguration.go | 65 + .../v1beta1/validatingwebhookconfiguration.go | 65 + .../listers/apps/v1/controllerrevision.go | 94 + .../client-go/listers/apps/v1/daemonset.go | 94 + .../listers/apps/v1/daemonset_expansion.go | 113 + .../client-go/listers/apps/v1/deployment.go | 94 + .../listers/apps/v1/deployment_expansion.go | 70 + .../listers/apps/v1/expansion_generated.go | 27 + .../client-go/listers/apps/v1/replicaset.go | 94 + .../listers/apps/v1/replicaset_expansion.go | 73 + .../client-go/listers/apps/v1/statefulset.go | 94 + .../listers/apps/v1/statefulset_expansion.go | 77 + .../apps/v1beta1/controllerrevision.go | 94 + .../listers/apps/v1beta1/deployment.go | 94 + .../apps/v1beta1/expansion_generated.go | 43 + .../client-go/listers/apps/v1beta1/scale.go | 94 + .../listers/apps/v1beta1/statefulset.go | 94 + .../apps/v1beta1/statefulset_expansion.go | 77 + .../apps/v1beta2/controllerrevision.go | 94 + .../listers/apps/v1beta2/daemonset.go | 94 + .../apps/v1beta2/daemonset_expansion.go | 113 + .../listers/apps/v1beta2/deployment.go | 94 + .../apps/v1beta2/deployment_expansion.go | 70 + .../apps/v1beta2/expansion_generated.go | 35 + .../listers/apps/v1beta2/replicaset.go | 94 + .../apps/v1beta2/replicaset_expansion.go | 73 + .../client-go/listers/apps/v1beta2/scale.go | 94 + .../listers/apps/v1beta2/statefulset.go | 94 + .../apps/v1beta2/statefulset_expansion.go | 77 + .../autoscaling/v1/expansion_generated.go | 27 + .../autoscaling/v1/horizontalpodautoscaler.go | 94 + .../v2beta1/expansion_generated.go | 27 + .../v2beta1/horizontalpodautoscaler.go | 94 + .../listers/batch/v1/expansion_generated.go | 19 + .../k8s.io/client-go/listers/batch/v1/job.go | 94 + .../listers/batch/v1/job_expansion.go | 68 + .../listers/batch/v1beta1/cronjob.go | 94 + .../batch/v1beta1/expansion_generated.go | 27 + .../listers/batch/v2alpha1/cronjob.go | 94 + .../batch/v2alpha1/expansion_generated.go | 27 + .../v1beta1/certificatesigningrequest.go | 65 + .../v1beta1/expansion_generated.go | 23 + .../listers/core/v1/componentstatus.go | 65 + .../client-go/listers/core/v1/configmap.go | 94 + .../client-go/listers/core/v1/endpoints.go | 94 + .../k8s.io/client-go/listers/core/v1/event.go | 94 + .../listers/core/v1/expansion_generated.go | 111 + .../client-go/listers/core/v1/limitrange.go | 94 + .../client-go/listers/core/v1/namespace.go | 65 + .../k8s.io/client-go/listers/core/v1/node.go | 65 + .../listers/core/v1/node_expansion.go | 48 + .../listers/core/v1/persistentvolume.go | 65 + .../listers/core/v1/persistentvolumeclaim.go | 94 + .../k8s.io/client-go/listers/core/v1/pod.go | 94 + .../client-go/listers/core/v1/podtemplate.go | 94 + .../listers/core/v1/replicationcontroller.go | 94 + .../v1/replicationcontroller_expansion.go | 66 + .../listers/core/v1/resourcequota.go | 94 + .../client-go/listers/core/v1/secret.go | 94 + .../client-go/listers/core/v1/service.go | 94 + .../listers/core/v1/service_expansion.go | 56 + .../listers/core/v1/serviceaccount.go | 94 + .../client-go/listers/events/v1beta1/event.go | 94 + .../events/v1beta1/expansion_generated.go | 27 + .../listers/extensions/v1beta1/daemonset.go | 94 + .../extensions/v1beta1/daemonset_expansion.go | 114 + .../listers/extensions/v1beta1/deployment.go | 94 + .../v1beta1/deployment_expansion.go | 70 + .../extensions/v1beta1/expansion_generated.go | 39 + .../listers/extensions/v1beta1/ingress.go | 94 + .../extensions/v1beta1/podsecuritypolicy.go | 65 + .../listers/extensions/v1beta1/replicaset.go | 94 + .../v1beta1/replicaset_expansion.go | 73 + .../listers/extensions/v1beta1/scale.go | 94 + .../networking/v1/expansion_generated.go | 27 + .../listers/networking/v1/networkpolicy.go | 94 + .../listers/policy/v1beta1/eviction.go | 94 + .../policy/v1beta1/expansion_generated.go | 27 + .../policy/v1beta1/poddisruptionbudget.go | 94 + .../v1beta1/poddisruptionbudget_expansion.go | 74 + .../client-go/listers/rbac/v1/clusterrole.go | 65 + .../listers/rbac/v1/clusterrolebinding.go | 65 + .../listers/rbac/v1/expansion_generated.go | 43 + .../k8s.io/client-go/listers/rbac/v1/role.go | 94 + .../client-go/listers/rbac/v1/rolebinding.go | 94 + .../listers/rbac/v1alpha1/clusterrole.go | 65 + .../rbac/v1alpha1/clusterrolebinding.go | 65 + .../rbac/v1alpha1/expansion_generated.go | 43 + .../client-go/listers/rbac/v1alpha1/role.go | 94 + .../listers/rbac/v1alpha1/rolebinding.go | 94 + .../listers/rbac/v1beta1/clusterrole.go | 65 + .../rbac/v1beta1/clusterrolebinding.go | 65 + .../rbac/v1beta1/expansion_generated.go | 43 + .../client-go/listers/rbac/v1beta1/role.go | 94 + .../listers/rbac/v1beta1/rolebinding.go | 94 + .../v1alpha1/expansion_generated.go | 23 + .../scheduling/v1alpha1/priorityclass.go | 65 + .../settings/v1alpha1/expansion_generated.go | 27 + .../listers/settings/v1alpha1/podpreset.go | 94 + .../listers/storage/v1/expansion_generated.go | 23 + .../listers/storage/v1/storageclass.go | 65 + .../storage/v1alpha1/expansion_generated.go | 23 + .../storage/v1alpha1/volumeattachment.go | 65 + .../storage/v1beta1/expansion_generated.go | 23 + .../listers/storage/v1beta1/storageclass.go | 65 + vendor/k8s.io/client-go/pkg/version/base.go | 8 +- vendor/k8s.io/client-go/rest/request.go | 20 + .../client-go/rest/zz_generated.deepcopy.go | 17 - .../k8s.io/client-go/tools/cache/reflector.go | 13 +- .../client-go/tools/clientcmd/api/doc.go | 2 +- .../clientcmd/api/zz_generated.deepcopy.go | 39 - .../tools/remotecommand/remotecommand.go | 26 +- vendor/k8s.io/client-go/transport/cache.go | 2 +- vendor/k8s.io/kube-openapi/README.md | 18 +- .../k8s.io/kube-openapi/pkg/util/proto/doc.go | 19 + .../kube-openapi/pkg/util/proto/document.go | 275 + .../kube-openapi/pkg/util/proto/openapi.go | 251 + .../k8s.io/kubernetes/pkg/api/service/util.go | 5 +- .../pkg/api/v1/zz_generated.conversion.go | 5430 ---------------- .../kubernetes/pkg/api/validation/events.go | 84 - .../pkg/apis/autoscaling/annotations.go | 34 + .../kubernetes/pkg/apis/autoscaling/doc.go | 19 + .../pkg/apis/autoscaling/register.go | 53 + .../kubernetes/pkg/apis/autoscaling/types.go | 363 ++ .../apis/autoscaling/zz_generated.deepcopy.go | 481 ++ .../core}/annotation_key_constants.go | 6 +- .../kubernetes/pkg/{api => apis/core}/doc.go | 4 +- .../pkg/{api => apis/core}/field_constants.go | 6 +- .../pkg/{api => apis/core}/helper/helpers.go | 341 +- .../pkg/{api => apis/core}/install/install.go | 11 +- .../kubernetes/pkg/{api => apis/core}/json.go | 2 +- .../pkg/{api => apis/core}/objectreference.go | 2 +- .../kubernetes/pkg/apis/core/pods/helpers.go | 63 + .../pkg/{api => apis/core}/register.go | 2 +- .../pkg/{api => apis/core}/resource.go | 2 +- .../pkg/{api => apis/core}/taint.go | 2 +- .../pkg/{api => apis/core}/toleration.go | 2 +- .../pkg/{api => apis/core}/types.go | 225 +- .../pkg/{api => apis/core}/v1/conversion.go | 151 +- .../pkg/{api => apis/core}/v1/defaults.go | 23 +- .../pkg/{api => apis/core}/v1/doc.go | 8 +- .../{api => apis/core}/v1/helper/helpers.go | 23 +- .../pkg/{api => apis/core}/v1/register.go | 0 .../apis/core/v1/zz_generated.conversion.go | 5620 +++++++++++++++++ .../core}/v1/zz_generated.defaults.go | 12 +- .../pkg/{api => apis/core}/validation/doc.go | 2 +- .../pkg/apis/core/validation/events.go | 129 + .../core}/validation/validation.go | 1127 ++-- .../core}/zz_generated.deepcopy.go | 945 +-- .../kubernetes/pkg/apis/extensions/doc.go | 2 +- .../pkg/apis/extensions/register.go | 7 +- .../kubernetes/pkg/apis/extensions/types.go | 155 +- .../apis/extensions/zz_generated.deepcopy.go | 480 +- .../kubernetes/pkg/apis/networking/doc.go | 2 +- .../kubernetes/pkg/apis/networking/types.go | 2 +- .../apis/networking/zz_generated.deepcopy.go | 51 +- .../kubernetes/pkg/cloudprovider/README.md | 4 +- .../kubernetes/pkg/cloudprovider/cloud.go | 6 + .../pkg/controller/client_builder.go | 19 +- .../pkg/controller/controller_utils.go | 8 +- .../kubernetes/pkg/features/kube_features.go | 68 +- vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go | 19 + .../kubernetes/pkg/fieldpath/fieldpath.go | 103 + .../apis/cri/v1alpha1/runtime/constants.go | 28 + .../pkg/kubelet/container/helpers.go | 4 +- .../pkg/kubelet/container/runtime.go | 9 +- .../kubelet/network/hostport/fake_iptables.go | 24 +- .../network/hostport/hostport_manager.go | 13 + .../network/hostport/hostport_syncer.go | 3 +- .../kubelet/server/portforward/httpstream.go | 2 +- .../kubelet/server/portforward/websocket.go | 2 +- .../server/remotecommand/httpstream.go | 2 +- .../pkg/kubelet/types/pod_update.go | 4 +- .../pkg/proxy/healthcheck/healthcheck.go | 2 +- .../kubernetes/pkg/proxy/iptables/proxier.go | 125 +- .../kubernetes/pkg/proxy/util/conntrack.go | 8 +- .../kubernetes/pkg/proxy/util/endpoints.go | 26 +- .../k8s.io/kubernetes/pkg/proxy/util/utils.go | 4 +- .../kubernetes/pkg/serviceaccount/util.go | 2 +- .../kubernetes/pkg/util/iptables/iptables.go | 4 +- .../kubernetes/pkg/util/mount/exec_mount.go | 140 + .../pkg/util/mount/exec_mount_unsupported.go | 87 + .../kubernetes/pkg/util/mount/mount_linux.go | 6 +- .../pkg/util/mount/mount_unsupported.go | 2 +- .../kubernetes/pkg/util/pointer/pointer.go | 6 + .../kubernetes/pkg/util/taints/taints.go | 4 +- .../k8s.io/kubernetes/pkg/volume/plugins.go | 61 + .../kubernetes/pkg/volume/util/error.go | 41 + .../kubernetes/pkg/volume/util/finalizer.go | 68 + .../k8s.io/kubernetes/pkg/volume/util/util.go | 214 +- .../kubernetes/pkg/volume/util/util_linux.go | 106 + .../pkg/volume/util/util_unsupported.go | 39 + vendor/k8s.io/kubernetes/pkg/volume/volume.go | 47 +- 535 files changed, 52955 insertions(+), 17528 deletions(-) create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go rename vendor/k8s.io/{apimachinery/pkg/conversion/unstructured => api/events/v1beta1}/doc.go (75%) create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/cloner.go rename vendor/k8s.io/apimachinery/pkg/{conversion/unstructured => runtime}/converter.go (90%) create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/informers/apps/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/interface.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/interface.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/informers/events/interface.go create mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/informers/events/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/informers/factory.go create mode 100644 vendor/k8s.io/client-go/informers/generic.go create mode 100644 vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go create mode 100644 vendor/k8s.io/client-go/informers/networking/interface.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/informers/policy/interface.go create mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/interface.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/informers/settings/interface.go create mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/informers/storage/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go create mode 100644 vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/node_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/service_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go create mode 100644 vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/document.go create mode 100644 vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go delete mode 100644 vendor/k8s.io/kubernetes/pkg/api/validation/events.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/annotation_key_constants.go (95%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/doc.go (91%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/field_constants.go (92%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/helper/helpers.go (58%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/install/install.go (89%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/json.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/objectreference.go (98%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/register.go (99%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/resource.go (99%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/taint.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/toleration.go (98%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/types.go (95%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/conversion.go (72%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/defaults.go (94%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/doc.go (73%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/helper/helpers.go (94%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/register.go (100%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/v1/zz_generated.defaults.go (97%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/validation/doc.go (90%) create mode 100644 vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/validation/validation.go (79%) rename vendor/k8s.io/kubernetes/pkg/{api => apis/core}/zz_generated.deepcopy.go (77%) create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go create mode 100644 vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go create mode 100644 vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/error.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go create mode 100644 vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml index b2cc9abe..722290aa 100644 --- a/contrib/test/integration/main.yml +++ b/contrib/test/integration/main.yml @@ -82,7 +82,8 @@ include: "build/kubernetes.yml" vars: force_clone: True - k8s_git_version: "release-1.9" + # master as of 12/11/2017 + k8s_git_version: "a48f11c2257d84b0bec89864025508b0ef626b4f" k8s_github_fork: "kubernetes" crio_socket: "/var/run/crio/crio.sock" - name: run k8s e2e tests diff --git a/vendor.conf b/vendor.conf index e534c48e..5c837b12 100644 --- a/vendor.conf +++ b/vendor.conf @@ -1,10 +1,10 @@ -k8s.io/kubernetes v1.9.0-alpha.2 https://github.com/kubernetes/kubernetes +k8s.io/kubernetes a48f11c2257d84b0bec89864025508b0ef626b4f https://github.com/kubernetes/kubernetes k8s.io/client-go master https://github.com/kubernetes/client-go k8s.io/apimachinery master https://github.com/kubernetes/apimachinery k8s.io/apiserver master https://github.com/kubernetes/apiserver k8s.io/utils 4fe312863be2155a7b68acd2aff1c9221b24e68c https://github.com/kubernetes/utils k8s.io/api master https://github.com/kubernetes/api -k8s.io/kube-openapi abfc5fbe1cf87ee697db107fdfd24c32fe4397a8 https://github.com/kubernetes/kube-openapi +k8s.io/kube-openapi 39a7bf85c140f972372c2a0d1ee40adbf0c8bfe1 https://github.com/kubernetes/kube-openapi k8s.io/apiextensions-apiserver master https://github.com/kubernetes/apiextensions-apiserver # github.com/googleapis/gnostic 0c5108395e2debce0d731cf0287ddf7242066aba diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index 49e0e098..8a5d1fbb 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -14,12 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // Package v1alpha1 is the v1alpha1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and ExternalAdmissionHookConfiguration is for the +// InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. // +groupName=admissionregistration.k8s.io package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go index 4c66de14..bdc26637 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -25,16 +25,10 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto It has these top-level messages: - AdmissionHookClientConfig - ExternalAdmissionHook - ExternalAdmissionHookConfiguration - ExternalAdmissionHookConfigurationList Initializer InitializerConfiguration InitializerConfigurationList Rule - RuleWithOperations - ServiceReference */ package v1alpha1 @@ -58,230 +52,32 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AdmissionHookClientConfig) Reset() { *m = AdmissionHookClientConfig{} } -func (*AdmissionHookClientConfig) ProtoMessage() {} -func (*AdmissionHookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalAdmissionHook) Reset() { *m = ExternalAdmissionHook{} } -func (*ExternalAdmissionHook) ProtoMessage() {} -func (*ExternalAdmissionHook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalAdmissionHookConfiguration) Reset() { *m = ExternalAdmissionHookConfiguration{} } -func (*ExternalAdmissionHookConfiguration) ProtoMessage() {} -func (*ExternalAdmissionHookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *ExternalAdmissionHookConfigurationList) Reset() { - *m = ExternalAdmissionHookConfigurationList{} -} -func (*ExternalAdmissionHookConfigurationList) ProtoMessage() {} -func (*ExternalAdmissionHookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - func (m *Initializer) Reset() { *m = Initializer{} } func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } func (*InitializerConfiguration) ProtoMessage() {} func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} + return fileDescriptorGenerated, []int{1} } func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } func (*InitializerConfigurationList) ProtoMessage() {} func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} + return fileDescriptorGenerated, []int{2} } func (m *Rule) Reset() { *m = Rule{} } func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func init() { - proto.RegisterType((*AdmissionHookClientConfig)(nil), "k8s.io.api.admissionregistration.v1alpha1.AdmissionHookClientConfig") - proto.RegisterType((*ExternalAdmissionHook)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHook") - proto.RegisterType((*ExternalAdmissionHookConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfiguration") - proto.RegisterType((*ExternalAdmissionHookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.ExternalAdmissionHookConfigurationList") proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1alpha1.RuleWithOperations") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1alpha1.ServiceReference") } -func (m *AdmissionHookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AdmissionHookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n1, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.CABundle != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.URLPath))) - i += copy(dAtA[i:], m.URLPath) - return i, nil -} - -func (m *ExternalAdmissionHook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n2, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - return i, nil -} - -func (m *ExternalAdmissionHookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.ExternalAdmissionHooks) > 0 { - for _, msg := range m.ExternalAdmissionHooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ExternalAdmissionHookConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalAdmissionHookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -334,11 +130,11 @@ func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n1 if len(m.Initializers) > 0 { for _, msg := range m.Initializers { dAtA[i] = 0x12 @@ -372,11 +168,11 @@ func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n2 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -455,73 +251,6 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n7, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -549,68 +278,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *AdmissionHookClientConfig) Size() (n int) { - var l int - _ = l - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.URLPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalAdmissionHook) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalAdmissionHookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalAdmissionHooks) > 0 { - for _, e := range m.ExternalAdmissionHooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ExternalAdmissionHookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *Initializer) Size() (n int) { var l int _ = l @@ -677,30 +344,6 @@ func (m *Rule) Size() (n int) { return n } -func (m *RuleWithOperations) Size() (n int) { - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceReference) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -714,53 +357,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AdmissionHookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AdmissionHookClientConfig{`, - `Service:` + strings.Replace(strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1), `&`, ``, 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URLPath:` + fmt.Sprintf("%v", this.URLPath) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "AdmissionHookClientConfig", "AdmissionHookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `ExternalAdmissionHooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExternalAdmissionHooks), "ExternalAdmissionHook", "ExternalAdmissionHook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalAdmissionHookConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalAdmissionHookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ExternalAdmissionHookConfiguration", "ExternalAdmissionHookConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func (this *Initializer) String() string { if this == nil { return "nil" @@ -806,28 +402,6 @@ func (this *Rule) String() string { }, "") return s } -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -836,538 +410,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AdmissionHookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AdmissionHookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AdmissionHookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URLPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.URLPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalAdmissionHooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalAdmissionHooks = append(m.ExternalAdmissionHooks, ExternalAdmissionHook{}) - if err := m.ExternalAdmissionHooks[len(m.ExternalAdmissionHooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalAdmissionHookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalAdmissionHookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ExternalAdmissionHookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1837,223 +879,6 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -2164,61 +989,40 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 893 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8b, 0x23, 0x45, - 0x14, 0x4e, 0x65, 0x32, 0x4c, 0x52, 0x49, 0xd8, 0xdd, 0x42, 0x97, 0x38, 0x48, 0x27, 0xf4, 0x61, - 0xc9, 0x22, 0x76, 0x3b, 0xa3, 0x2c, 0x82, 0x88, 0x4e, 0x8f, 0xbf, 0x06, 0x66, 0x77, 0xc7, 0x72, - 0x55, 0x10, 0x0f, 0x56, 0x3a, 0x2f, 0x49, 0x99, 0xfe, 0x45, 0x55, 0x75, 0x70, 0x3c, 0x88, 0x17, - 0xef, 0x82, 0x17, 0xaf, 0xde, 0xfc, 0x53, 0xe6, 0xb8, 0xc7, 0x39, 0x05, 0xa7, 0x05, 0x2f, 0x82, - 0x7f, 0xc0, 0x9c, 0xa4, 0x7f, 0xa5, 0x3b, 0x9b, 0x84, 0x9d, 0x28, 0xec, 0x2d, 0xf5, 0xbd, 0xfa, - 0xde, 0xfb, 0xde, 0x97, 0xf7, 0xaa, 0x31, 0x9d, 0xbe, 0x2d, 0x0d, 0xee, 0x9b, 0xd3, 0x70, 0x00, - 0xc2, 0x03, 0x05, 0xd2, 0x9c, 0x81, 0x37, 0xf4, 0x85, 0x99, 0x05, 0x58, 0xc0, 0x4d, 0x36, 0x74, - 0xb9, 0x94, 0xdc, 0xf7, 0x04, 0x8c, 0xb9, 0x54, 0x82, 0x29, 0xee, 0x7b, 0xe6, 0xec, 0x80, 0x39, - 0xc1, 0x84, 0x1d, 0x98, 0x63, 0xf0, 0x40, 0x30, 0x05, 0x43, 0x23, 0x10, 0xbe, 0xf2, 0xc9, 0xfd, - 0x94, 0x6a, 0xb0, 0x80, 0x1b, 0x6b, 0xa9, 0x46, 0x4e, 0xdd, 0x7f, 0x7d, 0xcc, 0xd5, 0x24, 0x1c, - 0x18, 0xb6, 0xef, 0x9a, 0x63, 0x7f, 0xec, 0x9b, 0x49, 0x86, 0x41, 0x38, 0x4a, 0x4e, 0xc9, 0x21, - 0xf9, 0x95, 0x66, 0xde, 0x7f, 0xab, 0x10, 0xe5, 0x32, 0x7b, 0xc2, 0x3d, 0x10, 0xe7, 0x66, 0x30, - 0x1d, 0xc7, 0x80, 0x34, 0x5d, 0x50, 0xcc, 0x9c, 0xad, 0xe8, 0xd9, 0x37, 0x37, 0xb1, 0x44, 0xe8, - 0x29, 0xee, 0xc2, 0x0a, 0xe1, 0xc1, 0xf3, 0x08, 0xd2, 0x9e, 0x80, 0xcb, 0x56, 0x78, 0x6f, 0x6e, - 0xe2, 0x85, 0x8a, 0x3b, 0x26, 0xf7, 0x94, 0x54, 0xe2, 0x59, 0x92, 0x7e, 0x89, 0xf0, 0x2b, 0x47, - 0xb9, 0x4b, 0x9f, 0xf8, 0xfe, 0xf4, 0xd8, 0xe1, 0xe0, 0xa9, 0x63, 0xdf, 0x1b, 0xf1, 0x31, 0x19, - 0xe1, 0x3d, 0x09, 0x62, 0xc6, 0x6d, 0xe8, 0xa0, 0x1e, 0xea, 0x37, 0x0f, 0xdf, 0x31, 0x6e, 0xec, - 0xae, 0xf1, 0x59, 0xca, 0xa4, 0x30, 0x02, 0x01, 0x9e, 0x0d, 0xd6, 0xad, 0x8b, 0x79, 0xb7, 0x12, - 0xcd, 0xbb, 0x7b, 0x79, 0x24, 0x4f, 0x4e, 0xfa, 0xb8, 0x6e, 0x33, 0x2b, 0xf4, 0x86, 0x0e, 0x74, - 0xaa, 0x3d, 0xd4, 0x6f, 0x59, 0xad, 0x68, 0xde, 0xad, 0x1f, 0x1f, 0xa5, 0x18, 0x5d, 0x44, 0xc9, - 0x7d, 0xbc, 0x17, 0x0a, 0xe7, 0x8c, 0xa9, 0x49, 0x67, 0xa7, 0x87, 0xfa, 0x8d, 0x22, 0xe9, 0xe7, - 0xf4, 0x34, 0x86, 0x69, 0x1e, 0xd7, 0xff, 0xae, 0xe2, 0x97, 0x3f, 0xfc, 0x4e, 0x81, 0xf0, 0x98, - 0xb3, 0xd4, 0x22, 0xe9, 0xe1, 0x9a, 0xc7, 0xdc, 0xb4, 0xa7, 0x86, 0xd5, 0xca, 0x32, 0xd4, 0x1e, - 0x31, 0x17, 0x68, 0x12, 0x21, 0x3f, 0xe0, 0x96, 0x5d, 0x32, 0x22, 0x11, 0xd5, 0x3c, 0xfc, 0x60, - 0x8b, 0xee, 0x37, 0x9a, 0x6a, 0xbd, 0x94, 0xd5, 0x6b, 0x95, 0x51, 0xba, 0x54, 0x8f, 0x0c, 0xf0, - 0xae, 0x08, 0x1d, 0x90, 0x9d, 0x9d, 0xde, 0x4e, 0xbf, 0x79, 0xf8, 0xee, 0x16, 0x85, 0x69, 0xe8, - 0xc0, 0x97, 0x5c, 0x4d, 0x1e, 0x07, 0x90, 0x86, 0xa4, 0xd5, 0xce, 0x2a, 0xee, 0xc6, 0x31, 0x49, - 0xd3, 0xd4, 0xe4, 0x14, 0xb7, 0x47, 0x8c, 0x3b, 0xa1, 0x80, 0x33, 0xdf, 0xe1, 0xf6, 0x79, 0xa7, - 0x96, 0xd8, 0x71, 0x2f, 0x9a, 0x77, 0xdb, 0x1f, 0x95, 0x03, 0xd7, 0xf3, 0xee, 0x9d, 0x25, 0xe0, - 0xc9, 0x79, 0x00, 0x74, 0x99, 0xac, 0xff, 0x56, 0xc5, 0xfa, 0x5a, 0xb7, 0xd3, 0x8e, 0xc2, 0x54, - 0x0b, 0xf9, 0x06, 0xd7, 0xe3, 0x45, 0x19, 0x32, 0xc5, 0xb2, 0x91, 0x7a, 0xa3, 0xd4, 0xdb, 0x62, - 0x6e, 0x8d, 0x60, 0x3a, 0x8e, 0x01, 0x69, 0xc4, 0xb7, 0x8d, 0xd9, 0x81, 0xf1, 0x78, 0xf0, 0x2d, - 0xd8, 0xea, 0x21, 0x28, 0x66, 0x91, 0xac, 0x1d, 0x5c, 0x60, 0x74, 0x91, 0x95, 0xfc, 0x8a, 0xf0, - 0x5d, 0x58, 0x27, 0x44, 0x76, 0xaa, 0x89, 0x99, 0xef, 0x6f, 0x61, 0xe6, 0xda, 0x8e, 0x2c, 0x2d, - 0x13, 0x70, 0x77, 0x6d, 0x58, 0xd2, 0x0d, 0xf5, 0xf5, 0x6b, 0x84, 0xef, 0x3d, 0xdf, 0xa3, 0x53, - 0x2e, 0x15, 0xf9, 0x7a, 0xc5, 0x27, 0xe3, 0x66, 0x3e, 0xc5, 0xec, 0xc4, 0xa5, 0xdb, 0x99, 0xc8, - 0x7a, 0x8e, 0x94, 0x3c, 0x12, 0x78, 0x97, 0x2b, 0x70, 0x73, 0x47, 0x1e, 0xfe, 0x5f, 0x47, 0x96, - 0xf4, 0x17, 0xe3, 0x76, 0x12, 0xd7, 0xa0, 0x69, 0x29, 0xfd, 0x27, 0x84, 0x9b, 0x27, 0x1e, 0x57, - 0x9c, 0x39, 0xfc, 0x7b, 0x10, 0x37, 0x58, 0xc2, 0x27, 0xf9, 0x12, 0xa4, 0x2a, 0xcd, 0x2d, 0x97, - 0x60, 0xfd, 0xd8, 0xeb, 0xff, 0x20, 0xdc, 0x29, 0xe9, 0x78, 0xd1, 0xe3, 0x19, 0xe0, 0x16, 0x2f, - 0xaa, 0xe7, 0xbd, 0x3d, 0xd8, 0xa2, 0xb7, 0x92, 0xf8, 0xe2, 0x2d, 0x29, 0x81, 0x92, 0x2e, 0x55, - 0xd0, 0xff, 0x42, 0xf8, 0xd5, 0x4d, 0x0d, 0xbf, 0x80, 0x59, 0x9b, 0x2c, 0xcf, 0xda, 0xf1, 0x7f, - 0xeb, 0xf4, 0x26, 0x13, 0xf6, 0x0b, 0xc2, 0xb5, 0xf8, 0xaf, 0x26, 0xaf, 0xe1, 0x06, 0x0b, 0xf8, - 0xc7, 0xc2, 0x0f, 0x03, 0xd9, 0x41, 0xbd, 0x9d, 0x7e, 0xc3, 0x6a, 0x47, 0xf3, 0x6e, 0xe3, 0xe8, - 0xec, 0x24, 0x05, 0x69, 0x11, 0x27, 0x07, 0xb8, 0xc9, 0x02, 0xfe, 0x05, 0x88, 0x58, 0x47, 0xaa, - 0xb2, 0x61, 0xdd, 0x8a, 0xe6, 0xdd, 0xe6, 0xd1, 0xd9, 0x49, 0x0e, 0xd3, 0xf2, 0x9d, 0x38, 0xbf, - 0x00, 0xe9, 0x87, 0xc2, 0xce, 0x5e, 0xe8, 0x2c, 0x3f, 0xcd, 0x41, 0x5a, 0xc4, 0xf5, 0xdf, 0x11, - 0x26, 0xab, 0x6f, 0x32, 0x79, 0x0f, 0x63, 0x7f, 0x71, 0xca, 0x44, 0x76, 0x93, 0xa9, 0x59, 0xa0, - 0xd7, 0xf3, 0x6e, 0x7b, 0x71, 0x4a, 0xde, 0xdc, 0x12, 0x85, 0x7c, 0x8a, 0x6b, 0xf1, 0x40, 0x67, - 0x9f, 0xa6, 0xad, 0x97, 0x63, 0xb1, 0x70, 0xf1, 0x89, 0x26, 0xa9, 0x74, 0xc0, 0xb7, 0x9f, 0xfd, - 0x68, 0x13, 0x13, 0x37, 0xe2, 0x65, 0x94, 0x01, 0xb3, 0xf3, 0x5d, 0xbd, 0x93, 0x51, 0x1b, 0x8f, - 0xf2, 0x00, 0x2d, 0xee, 0x2c, 0xf6, 0xba, 0xba, 0x69, 0xaf, 0x2d, 0xe3, 0xe2, 0x4a, 0xab, 0x3c, - 0xbd, 0xd2, 0x2a, 0x97, 0x57, 0x5a, 0xe5, 0xc7, 0x48, 0x43, 0x17, 0x91, 0x86, 0x9e, 0x46, 0x1a, - 0xba, 0x8c, 0x34, 0xf4, 0x47, 0xa4, 0xa1, 0x9f, 0xff, 0xd4, 0x2a, 0x5f, 0xd5, 0x73, 0xbd, 0xff, - 0x06, 0x00, 0x00, 0xff, 0xff, 0x01, 0xf7, 0xd5, 0xa0, 0x26, 0x0a, 0x00, 0x00, + // 545 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x3f, + 0x18, 0x6f, 0xfe, 0xdb, 0x42, 0x9b, 0x76, 0xf9, 0xcb, 0xe0, 0xa1, 0x14, 0x99, 0x96, 0x9e, 0x2a, + 0x62, 0x62, 0x57, 0x59, 0xbc, 0xee, 0xec, 0x41, 0x0a, 0xbe, 0x2c, 0x41, 0x3c, 0x88, 0x07, 0xd3, + 0xf6, 0xd9, 0x69, 0x6c, 0x27, 0x19, 0x92, 0x4c, 0x41, 0x4f, 0x5e, 0xbc, 0x0b, 0x7e, 0xa9, 0x1e, + 0xf7, 0xb8, 0xa7, 0x62, 0x47, 0xf0, 0xe8, 0x67, 0x90, 0x99, 0xe9, 0xec, 0xcc, 0x5a, 0x8b, 0xab, + 0xb7, 0x3c, 0xbf, 0x27, 0xbf, 0xb7, 0x04, 0xb3, 0xf9, 0x63, 0x43, 0x84, 0xa2, 0xf3, 0x68, 0x0c, + 0x5a, 0x82, 0x05, 0x43, 0x97, 0x20, 0xa7, 0x4a, 0xd3, 0xed, 0x82, 0x87, 0x82, 0xf2, 0x69, 0x20, + 0x8c, 0x11, 0x4a, 0x6a, 0xf0, 0x85, 0xb1, 0x9a, 0x5b, 0xa1, 0x24, 0x5d, 0x0e, 0xf9, 0x22, 0x9c, + 0xf1, 0x21, 0xf5, 0x41, 0x82, 0xe6, 0x16, 0xa6, 0x24, 0xd4, 0xca, 0x2a, 0xe7, 0x6e, 0x46, 0x25, + 0x3c, 0x14, 0xe4, 0xb7, 0x54, 0x92, 0x53, 0x3b, 0xf7, 0x7d, 0x61, 0x67, 0xd1, 0x98, 0x4c, 0x54, + 0x40, 0x7d, 0xe5, 0x2b, 0x9a, 0x2a, 0x8c, 0xa3, 0xf3, 0x74, 0x4a, 0x87, 0xf4, 0x94, 0x29, 0x77, + 0x1e, 0x15, 0xa1, 0x02, 0x3e, 0x99, 0x09, 0x09, 0xfa, 0x3d, 0x0d, 0xe7, 0x7e, 0x02, 0x18, 0x1a, + 0x80, 0xe5, 0x74, 0xb9, 0x93, 0xa7, 0x43, 0xf7, 0xb1, 0x74, 0x24, 0xad, 0x08, 0x60, 0x87, 0x70, + 0xfc, 0x27, 0x82, 0x99, 0xcc, 0x20, 0xe0, 0x3b, 0xbc, 0x87, 0xfb, 0x78, 0x91, 0x15, 0x0b, 0x2a, + 0xa4, 0x35, 0x56, 0xff, 0x4a, 0xea, 0x7f, 0x42, 0xb8, 0x39, 0x92, 0xc2, 0x0a, 0xbe, 0x10, 0x1f, + 0x40, 0x3b, 0x3d, 0x5c, 0x95, 0x3c, 0x80, 0x36, 0xea, 0xa1, 0x41, 0xc3, 0x6b, 0xad, 0xd6, 0xdd, + 0x4a, 0xbc, 0xee, 0x56, 0x9f, 0xf3, 0x00, 0x58, 0xba, 0x71, 0x5e, 0xe2, 0x9a, 0x8e, 0x16, 0x60, + 0xda, 0xff, 0xf5, 0x0e, 0x06, 0xcd, 0x23, 0x4a, 0x6e, 0xfc, 0xde, 0x84, 0x45, 0x0b, 0xf0, 0x0e, + 0xb7, 0x9a, 0xb5, 0x64, 0x32, 0x2c, 0x13, 0xeb, 0xff, 0x40, 0xb8, 0x5d, 0xca, 0x71, 0xaa, 0xe4, + 0xb9, 0xf0, 0xa3, 0x4c, 0xc0, 0x79, 0x8b, 0xeb, 0xc9, 0xeb, 0x4e, 0xb9, 0xe5, 0x69, 0xb0, 0xe6, + 0xd1, 0x83, 0x92, 0xeb, 0x55, 0x59, 0x12, 0xce, 0xfd, 0x04, 0x30, 0x24, 0xb9, 0x4d, 0x96, 0x43, + 0xf2, 0x62, 0xfc, 0x0e, 0x26, 0xf6, 0x19, 0x58, 0xee, 0x39, 0x5b, 0x5b, 0x5c, 0x60, 0xec, 0x4a, + 0xd5, 0x09, 0x71, 0x4b, 0x14, 0xee, 0x79, 0xb7, 0xe3, 0xbf, 0xe8, 0x56, 0x0a, 0xef, 0xdd, 0xde, + 0x7a, 0xb5, 0x4a, 0xa0, 0x61, 0xd7, 0x1c, 0xfa, 0xdf, 0x11, 0xbe, 0xb3, 0xaf, 0xf0, 0x53, 0x61, + 0xac, 0xf3, 0x66, 0xa7, 0x34, 0xb9, 0x59, 0xe9, 0x84, 0x9d, 0x56, 0xbe, 0xb5, 0x8d, 0x51, 0xcf, + 0x91, 0x52, 0xe1, 0x19, 0xae, 0x09, 0x0b, 0x41, 0xde, 0xf4, 0xf4, 0xdf, 0x9a, 0x5e, 0x4b, 0x5d, + 0xfc, 0xec, 0x28, 0x51, 0x66, 0x99, 0x41, 0xff, 0x0b, 0xc2, 0xd5, 0xe4, 0xab, 0x9d, 0x7b, 0xb8, + 0xc1, 0x43, 0xf1, 0x44, 0xab, 0x28, 0x34, 0x6d, 0xd4, 0x3b, 0x18, 0x34, 0xbc, 0xc3, 0x78, 0xdd, + 0x6d, 0x9c, 0x9c, 0x8d, 0x32, 0x90, 0x15, 0x7b, 0x67, 0x88, 0x9b, 0x3c, 0x14, 0xaf, 0x40, 0x27, + 0x39, 0xb2, 0x94, 0x0d, 0xef, 0xff, 0x78, 0xdd, 0x6d, 0x9e, 0x9c, 0x8d, 0x72, 0x98, 0x95, 0xef, + 0x24, 0xfa, 0x1a, 0x8c, 0x8a, 0xf4, 0x04, 0x4c, 0xfb, 0xa0, 0xd0, 0x67, 0x39, 0xc8, 0x8a, 0xbd, + 0x47, 0x56, 0x1b, 0xb7, 0x72, 0xb1, 0x71, 0x2b, 0x97, 0x1b, 0xb7, 0xf2, 0x31, 0x76, 0xd1, 0x2a, + 0x76, 0xd1, 0x45, 0xec, 0xa2, 0xcb, 0xd8, 0x45, 0x5f, 0x63, 0x17, 0x7d, 0xfe, 0xe6, 0x56, 0x5e, + 0xd7, 0xf3, 0xd2, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x1d, 0xfb, 0x23, 0x89, 0xaa, 0x04, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto index b431b318..82508ca6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -29,72 +29,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -message AdmissionHookClientConfig { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - optional ServiceReference service = 1; - - // URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object. - optional string urlPath = 3; - - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - optional bytes caBundle = 2; -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -message ExternalAdmissionHook { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - optional string name = 1; - - // ClientConfig defines how to communicate with the hook. - // Required - optional AdmissionHookClientConfig clientConfig = 2; - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - repeated RuleWithOperations rules = 3; - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - optional string failurePolicy = 4; -} - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -message ExternalAdmissionHookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated ExternalAdmissionHook externalAdmissionHooks = 2; -} - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -message ExternalAdmissionHookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ExternalAdmissionHookConfiguration. - repeated ExternalAdmissionHookConfiguration items = 2; -} - // Initializer describes the name and the failure policy of an initializer, and // what resources it applies to. message Initializer { @@ -155,7 +89,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -163,36 +97,12 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; } -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; - - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // Namespace is the namespace of the service - // Required - optional string namespace = 1; - - // Name is the name of the service - // Required - optional string name = 2; -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index e178cf74..e9a4164c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -45,8 +45,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &InitializerConfiguration{}, &InitializerConfigurationList{}, - &ExternalAdmissionHookConfiguration{}, - &ExternalAdmissionHookConfigurationList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go index 84f47b62..a245f1e8 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -104,120 +104,3 @@ type Rule struct { // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` } - -type FailurePolicyType string - -const ( - // Ignore means the initializer is removed from the initializers list of an - // object if the initializer is timed out. - Ignore FailurePolicyType = "Ignore" - // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the - // extensible admission feature is beta. - Fail FailurePolicyType = "Fail" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfiguration describes the configuration of initializers. -type ExternalAdmissionHookConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // ExternalAdmissionHooks is a list of external admission webhooks and the - // affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ExternalAdmissionHooks []ExternalAdmissionHook `json:"externalAdmissionHooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=externalAdmissionHooks"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration. -type ExternalAdmissionHookConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of ExternalAdmissionHookConfiguration. - Items []ExternalAdmissionHookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ExternalAdmissionHook describes an external admission webhook and the -// resources and operations it applies to. -type ExternalAdmissionHook struct { - // The name of the external admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig AdmissionHookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} - -type OperationType string - -// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. -const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" -) - -// AdmissionHookClientConfig contains the information to make a TLS -// connection with the webhook -type AdmissionHookClientConfig struct { - // Service is a reference to the service for this webhook. If there is only - // one port open for the service, that port will be used. If there are multiple - // ports open, port 443 will be used if it is open, otherwise it is an error. - // Required - Service ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - - // URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object. - URLPath string `json:"urlPath" protobuf:"bytes,3,opt,name=urlPath"` - - // CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. - // Required - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // Namespace is the namespace of the service - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // Name is the name of the service - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go index 77a8a519..e2494e5d 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -27,49 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_AdmissionHookClientConfig = map[string]string{ - "": "AdmissionHookClientConfig contains the information to make a TLS connection with the webhook", - "service": "Service is a reference to the service for this webhook. If there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error. Required", - "urlPath": "URLPath is an optional field that specifies the URL path to use when posting the AdmissionReview object.", - "caBundle": "CABundle is a PEM encoded CA bundle which will be used to validate webhook's server certificate. Required", -} - -func (AdmissionHookClientConfig) SwaggerDoc() map[string]string { - return map_AdmissionHookClientConfig -} - -var map_ExternalAdmissionHook = map[string]string{ - "": "ExternalAdmissionHook describes an external admission webhook and the resources and operations it applies to.", - "name": "The name of the external admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", -} - -func (ExternalAdmissionHook) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHook -} - -var map_ExternalAdmissionHookConfiguration = map[string]string{ - "": "ExternalAdmissionHookConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "externalAdmissionHooks": "ExternalAdmissionHooks is a list of external admission webhooks and the affected resources and operations.", -} - -func (ExternalAdmissionHookConfiguration) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfiguration -} - -var map_ExternalAdmissionHookConfigurationList = map[string]string{ - "": "ExternalAdmissionHookConfigurationList is a list of ExternalAdmissionHookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ExternalAdmissionHookConfiguration.", -} - -func (ExternalAdmissionHookConfigurationList) SwaggerDoc() map[string]string { - return map_ExternalAdmissionHookConfigurationList -} - var map_Initializer = map[string]string{ "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", @@ -111,23 +68,4 @@ func (Rule) SwaggerDoc() map[string]string { return map_Rule } -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", -} - -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "Namespace is the namespace of the service Required", - "name": "Name is the name of the service Required", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index 118fed75..850de37c 100644 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -21,187 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AdmissionHookClientConfig).DeepCopyInto(out.(*AdmissionHookClientConfig)) - return nil - }, InType: reflect.TypeOf(&AdmissionHookClientConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHook).DeepCopyInto(out.(*ExternalAdmissionHook)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHook{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfiguration).DeepCopyInto(out.(*ExternalAdmissionHookConfiguration)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExternalAdmissionHookConfigurationList).DeepCopyInto(out.(*ExternalAdmissionHookConfigurationList)) - return nil - }, InType: reflect.TypeOf(&ExternalAdmissionHookConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfiguration).DeepCopyInto(out.(*InitializerConfiguration)) - return nil - }, InType: reflect.TypeOf(&InitializerConfiguration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InitializerConfigurationList).DeepCopyInto(out.(*InitializerConfigurationList)) - return nil - }, InType: reflect.TypeOf(&InitializerConfigurationList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Rule).DeepCopyInto(out.(*Rule)) - return nil - }, InType: reflect.TypeOf(&Rule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RuleWithOperations).DeepCopyInto(out.(*RuleWithOperations)) - return nil - }, InType: reflect.TypeOf(&RuleWithOperations{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceReference).DeepCopyInto(out.(*ServiceReference)) - return nil - }, InType: reflect.TypeOf(&ServiceReference{})}, - ) -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AdmissionHookClientConfig) DeepCopyInto(out *AdmissionHookClientConfig) { - *out = *in - out.Service = in.Service - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionHookClientConfig. -func (in *AdmissionHookClientConfig) DeepCopy() *AdmissionHookClientConfig { - if in == nil { - return nil - } - out := new(AdmissionHookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHook) DeepCopyInto(out *ExternalAdmissionHook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - if *in == nil { - *out = nil - } else { - *out = new(FailurePolicyType) - **out = **in - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHook. -func (in *ExternalAdmissionHook) DeepCopy() *ExternalAdmissionHook { - if in == nil { - return nil - } - out := new(ExternalAdmissionHook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfiguration) DeepCopyInto(out *ExternalAdmissionHookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.ExternalAdmissionHooks != nil { - in, out := &in.ExternalAdmissionHooks, &out.ExternalAdmissionHooks - *out = make([]ExternalAdmissionHook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfiguration. -func (in *ExternalAdmissionHookConfiguration) DeepCopy() *ExternalAdmissionHookConfiguration { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyInto(out *ExternalAdmissionHookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ExternalAdmissionHookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalAdmissionHookConfigurationList. -func (in *ExternalAdmissionHookConfigurationList) DeepCopy() *ExternalAdmissionHookConfigurationList { - if in == nil { - return nil - } - out := new(ExternalAdmissionHookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ExternalAdmissionHookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Initializer) DeepCopyInto(out *Initializer) { *out = *in @@ -323,41 +145,3 @@ func (in *Rule) DeepCopy() *Rule { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) - } - in.Rule.DeepCopyInto(&out.Rule) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { - if in == nil { - return nil - } - out := new(RuleWithOperations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go new file mode 100644 index 00000000..afbb3d6d --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -0,0 +1,25 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go new file mode 100644 index 00000000..392ebf99 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -0,0 +1,2150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + + It has these top-level messages: + MutatingWebhookConfiguration + MutatingWebhookConfigurationList + Rule + RuleWithOperations + ServiceReference + ValidatingWebhookConfiguration + ValidatingWebhookConfigurationList + Webhook + WebhookClientConfig +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{0} +} + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} +} + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{6} +} + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func init() { + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) + n3, err := m.Rule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Path != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) + } + return i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + if m.CABundle != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) + } + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *MutatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Webhook) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 916 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0x1b, 0x45, + 0x14, 0xf6, 0xd6, 0x8e, 0x6c, 0x8f, 0x6d, 0xd1, 0x0c, 0x20, 0x99, 0xa8, 0xda, 0xb5, 0x7c, 0x40, + 0x96, 0x50, 0x76, 0x71, 0x8a, 0x10, 0x42, 0x20, 0x94, 0x8d, 0x54, 0x88, 0x94, 0xb4, 0x66, 0x02, + 0xad, 0x84, 0x38, 0x30, 0x5e, 0xbf, 0xd8, 0x83, 0xf7, 0x97, 0x66, 0x66, 0xdd, 0xe6, 0x86, 0xc4, + 0x3f, 0x80, 0xc4, 0x1f, 0xc1, 0x5f, 0xc1, 0x3d, 0x37, 0x7a, 0x41, 0xf4, 0xb4, 0x22, 0xcb, 0x99, + 0x03, 0xd7, 0x9e, 0xd0, 0xce, 0xae, 0xbd, 0x76, 0x1c, 0xa7, 0xee, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xde, 0xf7, 0xbd, 0xf7, 0x3d, 0xbf, 0xb7, 0xe8, 0xcb, 0xe9, 0x47, 0xc2, 0x64, 0x81, 0x35, 0x8d, + 0x86, 0xc0, 0x7d, 0x90, 0x20, 0xac, 0x19, 0xf8, 0xa3, 0x80, 0x5b, 0x79, 0x80, 0x86, 0xcc, 0xa2, + 0x23, 0x8f, 0x09, 0xc1, 0x02, 0x9f, 0xc3, 0x98, 0x09, 0xc9, 0xa9, 0x64, 0x81, 0x6f, 0xcd, 0xfa, + 0x43, 0x90, 0xb4, 0x6f, 0x8d, 0xc1, 0x07, 0x4e, 0x25, 0x8c, 0xcc, 0x90, 0x07, 0x32, 0xc0, 0xbd, + 0x8c, 0x69, 0xd2, 0x90, 0x99, 0x37, 0x32, 0xcd, 0x9c, 0xb9, 0xb7, 0x3f, 0x66, 0x72, 0x12, 0x0d, + 0x4d, 0x27, 0xf0, 0xac, 0x71, 0x30, 0x0e, 0x2c, 0x25, 0x30, 0x8c, 0xce, 0xd5, 0x4b, 0x3d, 0xd4, + 0xaf, 0x4c, 0x78, 0xaf, 0xbb, 0xd4, 0x92, 0x13, 0x70, 0xb0, 0x66, 0x6b, 0xc5, 0xf7, 0x4e, 0x8b, + 0x1c, 0x78, 0x26, 0xc1, 0x4f, 0x6b, 0x8b, 0x7d, 0x1a, 0x32, 0x01, 0x7c, 0x06, 0xdc, 0x0a, 0xa7, + 0xe3, 0x34, 0x26, 0x56, 0x13, 0x36, 0x79, 0xd9, 0xfb, 0xa0, 0x90, 0xf3, 0xa8, 0x33, 0x61, 0x3e, + 0xf0, 0x8b, 0x42, 0xc3, 0x03, 0x49, 0x6f, 0x6a, 0xc2, 0xda, 0xc4, 0xe2, 0x91, 0x2f, 0x99, 0x07, + 0x6b, 0x84, 0x0f, 0x5f, 0x45, 0x10, 0xce, 0x04, 0x3c, 0xba, 0xc6, 0xbb, 0xbf, 0x89, 0x17, 0x49, + 0xe6, 0x5a, 0xcc, 0x97, 0x42, 0xf2, 0xeb, 0xa4, 0xee, 0xef, 0x1a, 0xba, 0x77, 0x1a, 0x49, 0x2a, + 0x99, 0x3f, 0x7e, 0x02, 0xc3, 0x49, 0x10, 0x4c, 0x8f, 0x02, 0xff, 0x9c, 0x8d, 0xa3, 0xec, 0xef, + 0xc1, 0xdf, 0xa1, 0x5a, 0xea, 0x6c, 0x44, 0x25, 0x6d, 0x6b, 0x1d, 0xad, 0xd7, 0x38, 0x78, 0xdf, + 0x2c, 0xfe, 0xd3, 0x45, 0x21, 0x33, 0x9c, 0x8e, 0x53, 0x40, 0x98, 0x69, 0xb6, 0x39, 0xeb, 0x9b, + 0x8f, 0x86, 0xdf, 0x83, 0x23, 0x4f, 0x41, 0x52, 0x1b, 0x5f, 0xc6, 0x46, 0x29, 0x89, 0x0d, 0x54, + 0x60, 0x64, 0xa1, 0x8a, 0xcf, 0x50, 0x2d, 0xaf, 0x2c, 0xda, 0x77, 0x3a, 0xe5, 0x5e, 0xe3, 0xa0, + 0x6f, 0x6e, 0xbb, 0x35, 0x66, 0xce, 0xb4, 0x2b, 0x69, 0x09, 0x52, 0x7b, 0x9a, 0x0b, 0x75, 0xff, + 0xd6, 0x50, 0xe7, 0x36, 0x5f, 0x27, 0x4c, 0x48, 0xfc, 0xed, 0x9a, 0x37, 0x73, 0x3b, 0x6f, 0x29, + 0x5b, 0x39, 0xbb, 0x9b, 0x3b, 0xab, 0xcd, 0x91, 0x25, 0x5f, 0x53, 0xb4, 0xc3, 0x24, 0x78, 0x73, + 0x53, 0x0f, 0xb6, 0x37, 0x75, 0x5b, 0xe3, 0x76, 0x2b, 0x2f, 0xb9, 0x73, 0x9c, 0x8a, 0x93, 0xac, + 0x46, 0xf7, 0x67, 0x0d, 0x55, 0x48, 0xe4, 0x02, 0x7e, 0x0f, 0xd5, 0x69, 0xc8, 0x3e, 0xe7, 0x41, + 0x14, 0x8a, 0xb6, 0xd6, 0x29, 0xf7, 0xea, 0x76, 0x2b, 0x89, 0x8d, 0xfa, 0xe1, 0xe0, 0x38, 0x03, + 0x49, 0x11, 0xc7, 0x7d, 0xd4, 0xa0, 0x21, 0x7b, 0x0c, 0x5c, 0x2d, 0xbe, 0x6a, 0xb4, 0x6e, 0xbf, + 0x91, 0xc4, 0x46, 0xe3, 0x70, 0x70, 0x3c, 0x87, 0xc9, 0x72, 0x4e, 0xaa, 0xcf, 0x41, 0x04, 0x11, + 0x77, 0x40, 0xb4, 0xcb, 0x85, 0x3e, 0x99, 0x83, 0xa4, 0x88, 0x77, 0x7f, 0xd1, 0x10, 0x4e, 0xbb, + 0x7a, 0xc2, 0xe4, 0xe4, 0x51, 0x08, 0x99, 0x03, 0x81, 0x3f, 0x43, 0x28, 0x58, 0xbc, 0xf2, 0x26, + 0x0d, 0xb5, 0x1f, 0x0b, 0xf4, 0x65, 0x6c, 0xb4, 0x16, 0xaf, 0xaf, 0x2e, 0x42, 0x20, 0x4b, 0x14, + 0x3c, 0x40, 0x15, 0x1e, 0xb9, 0xd0, 0xbe, 0xb3, 0xf6, 0xa7, 0xbd, 0x62, 0xb2, 0x69, 0x33, 0x76, + 0x33, 0x9f, 0xa0, 0x1a, 0x18, 0x51, 0x4a, 0xdd, 0x1f, 0x35, 0x74, 0xf7, 0x0c, 0xf8, 0x8c, 0x39, + 0x40, 0xe0, 0x1c, 0x38, 0xf8, 0x0e, 0x60, 0x0b, 0xd5, 0x7d, 0xea, 0x81, 0x08, 0xa9, 0x03, 0x6a, + 0x41, 0xea, 0xf6, 0x6e, 0xce, 0xad, 0x3f, 0x9c, 0x07, 0x48, 0x91, 0x83, 0x3b, 0xa8, 0x92, 0x3e, + 0x54, 0x5f, 0xf5, 0xa2, 0x4e, 0x9a, 0x4b, 0x54, 0x04, 0xdf, 0x43, 0x95, 0x90, 0xca, 0x49, 0xbb, + 0xac, 0x32, 0x6a, 0x69, 0x74, 0x40, 0xe5, 0x84, 0x28, 0xb4, 0xfb, 0x87, 0x86, 0xf4, 0xc7, 0xd4, + 0x65, 0xa3, 0xff, 0xdd, 0x3d, 0xfe, 0xa3, 0xa1, 0xee, 0xed, 0xce, 0xfe, 0x83, 0x8b, 0xf4, 0x56, + 0x2f, 0xf2, 0x8b, 0xed, 0x6d, 0xdd, 0xde, 0xfa, 0x86, 0x9b, 0xfc, 0xad, 0x8c, 0xaa, 0x79, 0xfa, + 0x62, 0x33, 0xb4, 0x8d, 0x9b, 0xf1, 0x14, 0x35, 0x1d, 0x97, 0x81, 0x2f, 0x33, 0xe9, 0x7c, 0xb7, + 0x3f, 0x7d, 0xed, 0xd1, 0x1f, 0x2d, 0x89, 0xd8, 0x6f, 0xe5, 0x85, 0x9a, 0xcb, 0x28, 0x59, 0x29, + 0x84, 0x29, 0xda, 0x49, 0x4f, 0x20, 0xbb, 0xe6, 0xc6, 0xc1, 0x27, 0xaf, 0x77, 0x4d, 0xab, 0xa7, + 0x5d, 0x4c, 0x22, 0x8d, 0x09, 0x92, 0x29, 0xe3, 0x13, 0xd4, 0x3a, 0xa7, 0xcc, 0x8d, 0x38, 0x0c, + 0x02, 0x97, 0x39, 0x17, 0xed, 0x8a, 0x1a, 0xc3, 0xbb, 0x49, 0x6c, 0xb4, 0x1e, 0x2c, 0x07, 0x5e, + 0xc6, 0xc6, 0xee, 0x0a, 0xa0, 0x4e, 0x7f, 0x95, 0x8c, 0x9f, 0xa1, 0xdd, 0xc5, 0xc9, 0x9d, 0x81, + 0x0b, 0x8e, 0x0c, 0x78, 0x7b, 0x47, 0x8d, 0xeb, 0xfe, 0x96, 0xdb, 0x42, 0x87, 0xe0, 0xce, 0xa9, + 0xf6, 0xdb, 0x49, 0x6c, 0xec, 0x3e, 0xbc, 0xae, 0x48, 0xd6, 0x8b, 0x74, 0x7f, 0xd5, 0xd0, 0x9b, + 0x37, 0x8c, 0x19, 0x53, 0x54, 0x15, 0xd9, 0xc7, 0x23, 0xdf, 0xda, 0x8f, 0xb7, 0x1f, 0xe2, 0xf5, + 0xaf, 0x8e, 0xdd, 0x48, 0x62, 0xa3, 0x3a, 0x47, 0xe7, 0xba, 0xb8, 0x87, 0x6a, 0x0e, 0xb5, 0x23, + 0x7f, 0x94, 0x7f, 0xf6, 0x9a, 0x76, 0x33, 0xdd, 0xf2, 0xa3, 0xc3, 0x0c, 0x23, 0x8b, 0x28, 0x7e, + 0x07, 0x95, 0x23, 0xee, 0xe6, 0x5f, 0x98, 0x6a, 0x12, 0x1b, 0xe5, 0xaf, 0xc9, 0x09, 0x49, 0x31, + 0x7b, 0xff, 0xf2, 0x4a, 0x2f, 0x3d, 0xbf, 0xd2, 0x4b, 0x2f, 0xae, 0xf4, 0xd2, 0x0f, 0x89, 0xae, + 0x5d, 0x26, 0xba, 0xf6, 0x3c, 0xd1, 0xb5, 0x17, 0x89, 0xae, 0xfd, 0x99, 0xe8, 0xda, 0x4f, 0x7f, + 0xe9, 0xa5, 0x6f, 0xaa, 0x79, 0x6b, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x78, 0x57, 0x76, 0x28, + 0x10, 0x0a, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto new file mode 100644 index 00000000..513a3d16 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -0,0 +1,261 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +message Webhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go new file mode 100644 index 00000000..d126da9f --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go new file mode 100644 index 00000000..30d2750c --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -0,0 +1,281 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} + +type FailurePolicyType string + +const ( + // Ignore means the initializer is removed from the initializers list of an + // object if the initializer is timed out. + Ignore FailurePolicyType = "Ignore" + // For 1.7, only "Ignore" is allowed. "Fail" will be allowed when the + // extensible admission feature is beta. + Fail FailurePolicyType = "Fail" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is other cluster scoped resource, + // it is not subjected to the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // If there is only one port open for the service, that port will be + // used. If there are multiple ports open, port 443 will be used if it + // is open, otherwise it is an error. + // + // +optional + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..ea8c1e37 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is other cluster scoped resource, it is not subjected to the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nIf there is only one port open for the service, that port will be used. If there are multiple ports open, port 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..fbd9ad32 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,321 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]Webhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + if *in == nil { + *out = nil + } else { + *out = new(FailurePolicyType) + **out = **in + } + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + if *in == nil { + *out = nil + } else { + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + if in.Service != nil { + in, out := &in.Service, &out.Service + if *in == nil { + *out = nil + } else { + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 62eb80ce..1d66c222 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index f32e878f..38e7415b 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -25,12 +25,34 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto It has these top-level messages: + ControllerRevision + ControllerRevisionList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy */ package v1 @@ -38,6 +60,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" @@ -58,38 +82,229 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{20} +} + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{21} +} + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{27} +} func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -108,27 +323,69 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n1 + i += n4 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) + n5, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n5 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) + n6, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n6 + return i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } @@ -150,11 +407,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -189,28 +446,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n5, err := m.Selector.MarshalTo(dAtA[i:]) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n9 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n6, err := m.Template.MarshalTo(dAtA[i:]) + n10, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n10 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n7, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n11 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -266,6 +523,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -292,11 +561,507 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n8, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n12 + } + return i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + } + return i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + return i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } return i, nil } @@ -320,11 +1085,358 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n30 + } + return i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 + } + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 + } + return i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Partition != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + } + return i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n37, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n38, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n39, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + if len(m.VolumeClaimTemplates) > 0 { + for _, msg := range m.VolumeClaimTemplates { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + } + return i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.RollingUpdate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 } return i, nil } @@ -356,6 +1468,31 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *ControllerRevision) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *DaemonSet) Size() (n int) { var l int _ = l @@ -368,6 +1505,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -414,6 +1567,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -429,6 +1588,183 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { return n } +func (m *Deployment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *RollingUpdateDaemonSet) Size() (n int) { var l int _ = l @@ -439,6 +1775,137 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { return n } +func (m *RollingUpdateDeployment) Size() (n int) { + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *StatefulSet) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func sovGenerated(x uint64) (n int) { for { n++ @@ -452,6 +1919,29 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSet) String() string { if this == nil { return "nil" @@ -464,6 +1954,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -503,6 +2007,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -518,6 +2023,154 @@ func (this *DaemonSetUpdateStrategy) String() string { }, "") return s } +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *RollingUpdateDaemonSet) String() string { if this == nil { return "nil" @@ -528,6 +2181,110 @@ func (this *RollingUpdateDaemonSet) String() string { }, "") return s } +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -536,6 +2293,246 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -676,6 +2673,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1170,6 +3363,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1303,6 +3527,1827 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } return nil } +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1386,6 +5431,1277 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -1496,63 +6812,134 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 928 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x95, 0xcf, 0x73, 0xdb, 0x44, - 0x14, 0xc7, 0xad, 0x38, 0x0e, 0xce, 0xa6, 0x71, 0xc8, 0xd2, 0x49, 0x45, 0x18, 0xe4, 0x60, 0x2e, - 0x86, 0x0e, 0x12, 0x6e, 0x81, 0x61, 0xe0, 0xc0, 0x54, 0xe9, 0x4c, 0x29, 0x24, 0x0e, 0xac, 0x1b, - 0x0e, 0x0c, 0xcc, 0xb0, 0x96, 0x1e, 0xce, 0x62, 0xfd, 0x1a, 0xed, 0xca, 0x83, 0x6f, 0x9c, 0x38, - 0xf3, 0xa7, 0xc0, 0x5f, 0xd0, 0x6b, 0x8e, 0x3d, 0xf6, 0xe4, 0x21, 0xe6, 0xbf, 0xe0, 0x02, 0xb3, - 0xab, 0x8d, 0x6d, 0xd9, 0x72, 0x9a, 0x9b, 0xf7, 0xbd, 0xef, 0xf7, 0xb3, 0x6f, 0xdf, 0x3e, 0xaf, - 0xd0, 0xe7, 0xc3, 0x4f, 0xb9, 0xcd, 0x62, 0x67, 0x98, 0xf5, 0x21, 0x8d, 0x40, 0x00, 0x77, 0x46, - 0x10, 0xf9, 0x71, 0xea, 0xe8, 0x04, 0x4d, 0x98, 0x43, 0x93, 0x84, 0x3b, 0xa3, 0x8e, 0x33, 0x80, - 0x08, 0x52, 0x2a, 0xc0, 0xb7, 0x93, 0x34, 0x16, 0x31, 0xc6, 0xb9, 0xc6, 0xa6, 0x09, 0xb3, 0xa5, - 0xc6, 0x1e, 0x75, 0x0e, 0x3f, 0x18, 0x30, 0x71, 0x91, 0xf5, 0x6d, 0x2f, 0x0e, 0x9d, 0x41, 0x3c, - 0x88, 0x1d, 0x25, 0xed, 0x67, 0x3f, 0xab, 0x95, 0x5a, 0xa8, 0x5f, 0x39, 0xe2, 0xb0, 0xb5, 0xb0, - 0x8d, 0x17, 0xa7, 0x50, 0xb2, 0xcd, 0xe1, 0x7b, 0x0b, 0x9a, 0x24, 0x0e, 0x98, 0x37, 0x76, 0x46, - 0x9d, 0x3e, 0x08, 0xba, 0x2a, 0xfd, 0x68, 0x2e, 0x0d, 0xa9, 0x77, 0xc1, 0x22, 0x48, 0xc7, 0x4e, - 0x32, 0x1c, 0xc8, 0x00, 0x77, 0x42, 0x10, 0xb4, 0x6c, 0x03, 0x67, 0x9d, 0x2b, 0xcd, 0x22, 0xc1, - 0x42, 0x58, 0x31, 0x7c, 0xf2, 0x2a, 0x03, 0xf7, 0x2e, 0x20, 0xa4, 0x2b, 0xbe, 0x87, 0xeb, 0x7c, - 0x99, 0x60, 0x81, 0xc3, 0x22, 0xc1, 0x45, 0xba, 0x6c, 0x6a, 0xfd, 0x67, 0xa0, 0xed, 0xc7, 0x14, - 0xc2, 0x38, 0xea, 0x81, 0xc0, 0x3f, 0xa1, 0xba, 0x3c, 0x86, 0x4f, 0x05, 0x35, 0x8d, 0x23, 0xa3, - 0xbd, 0xf3, 0xe0, 0x43, 0x7b, 0x7e, 0x0d, 0x33, 0xaa, 0x9d, 0x0c, 0x07, 0x32, 0xc0, 0x6d, 0xa9, - 0xb6, 0x47, 0x1d, 0xfb, 0xac, 0xff, 0x0b, 0x78, 0xe2, 0x14, 0x04, 0x75, 0xf1, 0xe5, 0xa4, 0x59, - 0x99, 0x4e, 0x9a, 0x68, 0x1e, 0x23, 0x33, 0x2a, 0x3e, 0x46, 0x9b, 0x3c, 0x01, 0xcf, 0xdc, 0x50, - 0xf4, 0x77, 0xec, 0xd5, 0x4b, 0xb6, 0x67, 0xe5, 0xf4, 0x12, 0xf0, 0xdc, 0x3b, 0x1a, 0xb7, 0x29, - 0x57, 0x44, 0x99, 0xf1, 0xd7, 0x68, 0x8b, 0x0b, 0x2a, 0x32, 0x6e, 0x56, 0x15, 0xe6, 0xdd, 0x9b, - 0x31, 0x4a, 0xea, 0x36, 0x34, 0x68, 0x2b, 0x5f, 0x13, 0x8d, 0x68, 0xfd, 0x65, 0xa0, 0xdd, 0x99, - 0xf6, 0x84, 0x71, 0x81, 0x7f, 0x58, 0xe9, 0x82, 0x7d, 0xbb, 0x2e, 0x48, 0xb7, 0xea, 0xc1, 0xeb, - 0x7a, 0xaf, 0xfa, 0x75, 0x64, 0xa1, 0x03, 0x2e, 0xaa, 0x31, 0x01, 0x21, 0x37, 0x37, 0x8e, 0xaa, - 0xed, 0x9d, 0x07, 0x6f, 0xdf, 0x58, 0xbb, 0xbb, 0xab, 0x49, 0xb5, 0xa7, 0xd2, 0x43, 0x72, 0x6b, - 0xeb, 0x79, 0x75, 0xa1, 0x66, 0xd9, 0x18, 0xfc, 0x23, 0xaa, 0x73, 0x08, 0xc0, 0x13, 0x71, 0xaa, - 0x6b, 0x7e, 0x78, 0xcb, 0x9a, 0x69, 0x1f, 0x82, 0x9e, 0xb6, 0xba, 0x77, 0x64, 0xd1, 0xd7, 0x2b, - 0x32, 0x43, 0xe2, 0x6f, 0x51, 0x5d, 0x40, 0x98, 0x04, 0x54, 0x80, 0xbe, 0xba, 0x42, 0xcf, 0xe5, - 0x9f, 0x4b, 0xc2, 0xbe, 0x89, 0xfd, 0x67, 0x5a, 0xa6, 0x2e, 0x6f, 0xd6, 0x87, 0xeb, 0x28, 0x99, - 0x61, 0xf0, 0x10, 0x35, 0xb2, 0xc4, 0x97, 0x4a, 0x21, 0x07, 0x72, 0x30, 0xd6, 0x97, 0x79, 0xff, - 0xc6, 0x86, 0x9c, 0x17, 0x2c, 0xee, 0x81, 0xde, 0xa0, 0x51, 0x8c, 0x93, 0x25, 0x34, 0x7e, 0x84, - 0xf6, 0x42, 0x16, 0x11, 0xa0, 0xfe, 0xb8, 0x07, 0x5e, 0x1c, 0xf9, 0xdc, 0xdc, 0x3c, 0x32, 0xda, - 0x35, 0xf7, 0x9e, 0x06, 0xec, 0x9d, 0x16, 0xd3, 0x64, 0x59, 0x8f, 0x4f, 0xd0, 0xdd, 0x14, 0x46, - 0x8c, 0xb3, 0x38, 0xfa, 0x92, 0x71, 0x11, 0xa7, 0xe3, 0x13, 0x16, 0x32, 0x61, 0x6e, 0x29, 0x8e, - 0x39, 0x9d, 0x34, 0xef, 0x92, 0x92, 0x3c, 0x29, 0x75, 0xb5, 0xfe, 0xac, 0xa1, 0xbd, 0xa5, 0x09, - 0xc5, 0xdf, 0xa1, 0x03, 0x2f, 0x4b, 0x53, 0x88, 0x44, 0x37, 0x0b, 0xfb, 0x90, 0xf6, 0xbc, 0x0b, - 0xf0, 0xb3, 0x00, 0x7c, 0x75, 0xa3, 0x35, 0xd7, 0xd2, 0xb5, 0x1e, 0x1c, 0x97, 0xaa, 0xc8, 0x1a, - 0x37, 0xfe, 0x0a, 0xe1, 0x48, 0x85, 0x4e, 0x19, 0xe7, 0x33, 0xe6, 0x86, 0x62, 0x1e, 0x6a, 0x26, - 0xee, 0xae, 0x28, 0x48, 0x89, 0x4b, 0xd6, 0xe8, 0x03, 0x67, 0x29, 0xf8, 0xcb, 0x35, 0x56, 0x8b, - 0x35, 0x3e, 0x2e, 0x55, 0x91, 0x35, 0x6e, 0xfc, 0x31, 0xda, 0xc9, 0x77, 0x53, 0x3d, 0xd7, 0x97, - 0xf3, 0x86, 0x86, 0xed, 0x74, 0xe7, 0x29, 0xb2, 0xa8, 0x93, 0x47, 0x8b, 0xfb, 0x1c, 0xd2, 0x11, - 0xf8, 0x4f, 0xf2, 0x97, 0x8d, 0xc5, 0x91, 0x59, 0x3b, 0x32, 0xda, 0xd5, 0xf9, 0xd1, 0xce, 0x56, - 0x14, 0xa4, 0xc4, 0x25, 0x8f, 0x96, 0x4f, 0xcd, 0xca, 0xd1, 0xb6, 0x8a, 0x47, 0x3b, 0x2f, 0x55, - 0x91, 0x35, 0x6e, 0x39, 0x7b, 0x79, 0xc9, 0x8f, 0x46, 0x94, 0x05, 0xb4, 0x1f, 0x80, 0xf9, 0x5a, - 0x71, 0xf6, 0xba, 0xc5, 0x34, 0x59, 0xd6, 0xe3, 0x27, 0x68, 0x3f, 0x0f, 0x9d, 0x47, 0x74, 0x06, - 0xa9, 0x2b, 0xc8, 0x9b, 0x1a, 0xb2, 0xdf, 0x5d, 0x16, 0x90, 0x55, 0x0f, 0xfe, 0x0c, 0x35, 0xbc, - 0x38, 0x08, 0xd4, 0x3c, 0x1e, 0xc7, 0x59, 0x24, 0xcc, 0x6d, 0x45, 0xc1, 0xf2, 0x3f, 0x74, 0x5c, - 0xc8, 0x90, 0x25, 0x65, 0xeb, 0xb9, 0x81, 0xee, 0xad, 0xf9, 0x1f, 0xe2, 0x2f, 0xd0, 0xa6, 0x18, - 0x27, 0xa0, 0x06, 0x75, 0xdb, 0xbd, 0x7f, 0xfd, 0x66, 0x3f, 0x1b, 0x27, 0xf0, 0xef, 0xa4, 0xf9, - 0xd6, 0x1a, 0x9b, 0x4c, 0x13, 0x65, 0xc4, 0x1e, 0xda, 0x4d, 0xe5, 0x76, 0xd1, 0x20, 0x97, 0xe8, - 0x57, 0xe6, 0xfd, 0xb2, 0xc7, 0x80, 0x2c, 0x0a, 0xe7, 0x4f, 0xe5, 0xfe, 0x74, 0xd2, 0xdc, 0x2d, - 0xe4, 0x48, 0x91, 0xd9, 0xfa, 0xdd, 0x40, 0x07, 0xe5, 0x66, 0x1c, 0xa0, 0x46, 0x48, 0x7f, 0x5d, - 0x6c, 0xef, 0xab, 0xbe, 0x7f, 0xf2, 0xab, 0x6a, 0xe7, 0x5f, 0x55, 0xfb, 0x69, 0x24, 0xce, 0xd2, - 0x9e, 0x48, 0x59, 0x34, 0xc8, 0x5b, 0x79, 0x5a, 0x60, 0x91, 0x25, 0xb6, 0xdb, 0xbe, 0xbc, 0xb2, - 0x2a, 0x2f, 0xae, 0xac, 0xca, 0xcb, 0x2b, 0xab, 0xf2, 0xdb, 0xd4, 0x32, 0x2e, 0xa7, 0x96, 0xf1, - 0x62, 0x6a, 0x19, 0x2f, 0xa7, 0x96, 0xf1, 0xf7, 0xd4, 0x32, 0xfe, 0xf8, 0xc7, 0xaa, 0x7c, 0xbf, - 0x31, 0xea, 0xfc, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x89, 0x78, 0x09, 0x33, 0x43, 0x09, 0x00, 0x00, + // 2051 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0xcd, 0x66, 0x7b, 0xf0, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0x35, 0x33, 0xb5, 0xe3, 0x8e, 0xfb, 0x4b, 0xdd, 0xd5, + 0xc3, 0x8e, 0xb8, 0x20, 0x24, 0x38, 0x71, 0xe0, 0x3f, 0x41, 0x08, 0xc1, 0x0d, 0x45, 0x88, 0xcb, + 0x5e, 0x90, 0x22, 0x2e, 0xe4, 0x64, 0xb1, 0x93, 0x13, 0x42, 0x39, 0x72, 0xc9, 0x05, 0x54, 0xd5, + 0xd5, 0xdf, 0xd5, 0x9e, 0xb1, 0x37, 0xeb, 0xa0, 0x68, 0x6f, 0x9e, 0xaa, 0xf7, 0x7b, 0xfd, 0xab, + 0xaa, 0x5f, 0xd5, 0x7b, 0x5d, 0x6d, 0x70, 0xef, 0xf8, 0xdb, 0xae, 0xaa, 0x59, 0xcd, 0x63, 0xaf, + 0x4b, 0x1c, 0x93, 0x50, 0xe2, 0x36, 0x87, 0xc4, 0xec, 0x5b, 0x4e, 0x53, 0x74, 0x60, 0x5b, 0x6b, + 0x62, 0xdb, 0x76, 0x9b, 0xc3, 0xcd, 0xe6, 0x80, 0x98, 0xc4, 0xc1, 0x94, 0xf4, 0x55, 0xdb, 0xb1, + 0xa8, 0x05, 0xa1, 0x8f, 0x51, 0xb1, 0xad, 0xa9, 0x0c, 0xa3, 0x0e, 0x37, 0xaf, 0xde, 0x1e, 0x68, + 0xf4, 0xc8, 0xeb, 0xaa, 0x3d, 0xcb, 0x68, 0x0e, 0xac, 0x81, 0xd5, 0xe4, 0xd0, 0xae, 0xf7, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x3e, 0xc5, 0xd5, 0x46, 0xec, 0x31, 0x3d, 0xcb, 0x21, 0x92, 0xc7, + 0x5c, 0xbd, 0x19, 0xc3, 0xd8, 0x96, 0xae, 0xf5, 0x46, 0xcd, 0xe1, 0x66, 0x97, 0x50, 0x9c, 0x85, + 0xde, 0x8d, 0xa0, 0x06, 0xee, 0x1d, 0x69, 0x26, 0x71, 0x46, 0x4d, 0xfb, 0x78, 0xc0, 0x1a, 0xdc, + 0xa6, 0x41, 0x28, 0x96, 0x3d, 0xa0, 0x99, 0x17, 0xe5, 0x78, 0x26, 0xd5, 0x0c, 0x92, 0x09, 0x78, + 0x73, 0x52, 0x80, 0xdb, 0x3b, 0x22, 0x06, 0xce, 0xc4, 0xbd, 0x9e, 0x17, 0xe7, 0x51, 0x4d, 0x6f, + 0x6a, 0x26, 0x75, 0xa9, 0x93, 0x0e, 0x6a, 0xfc, 0x47, 0x01, 0xb0, 0x6d, 0x99, 0xd4, 0xb1, 0x74, + 0x9d, 0x38, 0x88, 0x0c, 0x35, 0x57, 0xb3, 0x4c, 0xf8, 0x53, 0x50, 0x61, 0xe3, 0xe9, 0x63, 0x8a, + 0xab, 0xca, 0x75, 0x65, 0x63, 0xe1, 0xce, 0xb7, 0xd4, 0x68, 0x3d, 0x42, 0x7a, 0xd5, 0x3e, 0x1e, + 0xb0, 0x06, 0x57, 0x65, 0x68, 0x75, 0xb8, 0xa9, 0xee, 0x75, 0x3f, 0x20, 0x3d, 0xda, 0x21, 0x14, + 0xb7, 0xe0, 0x93, 0x93, 0xfa, 0xcc, 0xf8, 0xa4, 0x0e, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x3d, 0x50, + 0xe2, 0xec, 0x05, 0xce, 0x7e, 0x3b, 0x97, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3f, 0xa6, + 0xc4, 0x64, 0xe9, 0xb5, 0x2e, 0x09, 0xea, 0xd2, 0x36, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x1a, 0xa8, + 0x38, 0x22, 0xfd, 0x6a, 0xf1, 0xba, 0xb2, 0x51, 0x6c, 0x5d, 0x16, 0xa8, 0x4a, 0x30, 0x2c, 0x14, + 0x22, 0x1a, 0x7f, 0x55, 0xc0, 0x5a, 0x76, 0xdc, 0x3b, 0x9a, 0x4b, 0xe1, 0xfb, 0x99, 0xb1, 0xab, + 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x87, 0x0f, 0x0e, 0x5a, 0x62, 0xe3, 0x7e, 0x0f, 0x94, 0x35, + 0x4a, 0x0c, 0xb7, 0x5a, 0xb8, 0x5e, 0xdc, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0x96, 0xb9, 0x9a, 0x4d, + 0xac, 0xb5, 0x28, 0x28, 0xcb, 0xef, 0xb2, 0x60, 0xe4, 0x73, 0x34, 0xfe, 0xab, 0x80, 0xf9, 0x6d, + 0x4c, 0x0c, 0xcb, 0x3c, 0x20, 0xf4, 0x02, 0x16, 0xad, 0x0d, 0x4a, 0xae, 0x4d, 0x7a, 0x62, 0xd1, + 0xbe, 0x26, 0xcb, 0x3d, 0x4c, 0xe7, 0xc0, 0x26, 0xbd, 0x68, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, 0xe1, + 0x7b, 0x60, 0xd6, 0xa5, 0x98, 0x7a, 0x2e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, 0x43, + 0x5b, 0x4b, 0x82, 0x68, 0xd6, 0xff, 0x8d, 0x04, 0x45, 0xe3, 0x5f, 0x05, 0x00, 0x43, 0x6c, 0xdb, + 0x32, 0xfb, 0x1a, 0x65, 0xf5, 0xfb, 0x16, 0x28, 0xd1, 0x91, 0x4d, 0xf8, 0x34, 0xcc, 0xb7, 0x6e, + 0x04, 0x59, 0xdc, 0x1f, 0xd9, 0xe4, 0xb3, 0x93, 0xfa, 0x5a, 0x36, 0x82, 0xf5, 0x20, 0x1e, 0x03, + 0x77, 0xc2, 0xfc, 0x0a, 0x3c, 0xfa, 0x6e, 0xf2, 0xd1, 0x9f, 0x9d, 0xd4, 0x25, 0xe7, 0x8a, 0x1a, + 0x32, 0x25, 0x13, 0x84, 0x43, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, 0x36, 0x5d, 0xff, 0x49, 0x9a, + 0x41, 0xc4, 0xc8, 0x5f, 0x9d, 0x6e, 0x79, 0x58, 0x44, 0xeb, 0xaa, 0xc8, 0x02, 0xee, 0x64, 0xd8, + 0x90, 0xe4, 0x09, 0xf0, 0x06, 0x98, 0x75, 0x08, 0x76, 0x2d, 0xb3, 0x5a, 0xe2, 0xa3, 0x08, 0x27, + 0x10, 0xf1, 0x56, 0x24, 0x7a, 0xe1, 0x4d, 0x30, 0x67, 0x10, 0xd7, 0xc5, 0x03, 0x52, 0x2d, 0x73, + 0xe0, 0xb2, 0x00, 0xce, 0x75, 0xfc, 0x66, 0x14, 0xf4, 0x37, 0x7e, 0xaf, 0x80, 0xc5, 0x70, 0xe6, + 0x2e, 0x60, 0xab, 0xb4, 0x92, 0x5b, 0xe5, 0xe5, 0x53, 0xeb, 0x24, 0x67, 0x87, 0x7c, 0x58, 0x8c, + 0xe5, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0x54, 0x5c, 0xa2, 0x93, 0x1e, 0xb5, 0x1c, 0x91, 0xf3, 0xeb, + 0x53, 0xe6, 0x8c, 0xbb, 0x44, 0x3f, 0x10, 0xa1, 0xad, 0x4b, 0x2c, 0xe9, 0xe0, 0x17, 0x0a, 0x29, + 0xe1, 0x8f, 0x40, 0x85, 0x12, 0xc3, 0xd6, 0x31, 0x25, 0x62, 0x9b, 0x24, 0xea, 0x9b, 0x95, 0x0b, + 0x23, 0xdb, 0xb7, 0xfa, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x70, 0x1e, 0x82, 0x56, 0x14, 0xd2, 0xc0, + 0x63, 0xb0, 0xe4, 0xd9, 0x7d, 0x86, 0xa4, 0xec, 0xe8, 0x1e, 0x8c, 0x44, 0xf9, 0xdc, 0x3a, 0x75, + 0x42, 0x0e, 0x13, 0x21, 0xad, 0x35, 0xf1, 0x80, 0xa5, 0x64, 0x3b, 0x4a, 0x51, 0xc3, 0x2d, 0xb0, + 0x6c, 0x68, 0x26, 0x22, 0xb8, 0x3f, 0x3a, 0x20, 0x3d, 0xcb, 0xec, 0xbb, 0xbc, 0x80, 0xca, 0xad, + 0x75, 0x41, 0xb0, 0xdc, 0x49, 0x76, 0xa3, 0x34, 0x1e, 0xee, 0x80, 0xd5, 0xe0, 0x9c, 0xfd, 0x81, + 0xe6, 0x52, 0xcb, 0x19, 0xed, 0x68, 0x86, 0x46, 0xab, 0xb3, 0x9c, 0xa7, 0x3a, 0x3e, 0xa9, 0xaf, + 0x22, 0x49, 0x3f, 0x92, 0x46, 0x35, 0x7e, 0x33, 0x0b, 0x96, 0x53, 0xa7, 0x01, 0x7c, 0x00, 0xd6, + 0x7a, 0x9e, 0xe3, 0x10, 0x93, 0xee, 0x7a, 0x46, 0x97, 0x38, 0x07, 0xbd, 0x23, 0xd2, 0xf7, 0x74, + 0xd2, 0xe7, 0x2b, 0x5a, 0x6e, 0xd5, 0x44, 0xae, 0x6b, 0x6d, 0x29, 0x0a, 0xe5, 0x44, 0xc3, 0x1f, + 0x02, 0x68, 0xf2, 0xa6, 0x8e, 0xe6, 0xba, 0x21, 0x67, 0x81, 0x73, 0x86, 0x1b, 0x70, 0x37, 0x83, + 0x40, 0x92, 0x28, 0x96, 0x63, 0x9f, 0xb8, 0x9a, 0x43, 0xfa, 0xe9, 0x1c, 0x8b, 0xc9, 0x1c, 0xb7, + 0xa5, 0x28, 0x94, 0x13, 0x0d, 0xdf, 0x00, 0x0b, 0xfe, 0xd3, 0xf8, 0x9c, 0x8b, 0xc5, 0x59, 0x11, + 0x64, 0x0b, 0xbb, 0x51, 0x17, 0x8a, 0xe3, 0xd8, 0xd0, 0xac, 0xae, 0x4b, 0x9c, 0x21, 0xe9, 0xbf, + 0xe3, 0x7b, 0x00, 0x26, 0x94, 0x65, 0x2e, 0x94, 0xe1, 0xd0, 0xf6, 0x32, 0x08, 0x24, 0x89, 0x62, + 0x43, 0xf3, 0xab, 0x26, 0x33, 0xb4, 0xd9, 0xe4, 0xd0, 0x0e, 0xa5, 0x28, 0x94, 0x13, 0xcd, 0x6a, + 0xcf, 0x4f, 0x79, 0x6b, 0x88, 0x35, 0x1d, 0x77, 0x75, 0x52, 0x9d, 0x4b, 0xd6, 0xde, 0x6e, 0xb2, + 0x1b, 0xa5, 0xf1, 0xf0, 0x1d, 0x70, 0xc5, 0x6f, 0x3a, 0x34, 0x71, 0x48, 0x52, 0xe1, 0x24, 0x2f, + 0x09, 0x92, 0x2b, 0xbb, 0x69, 0x00, 0xca, 0xc6, 0xc0, 0xb7, 0xc0, 0x52, 0xcf, 0xd2, 0x75, 0x5e, + 0x8f, 0x6d, 0xcb, 0x33, 0x69, 0x75, 0x9e, 0xb3, 0x40, 0xb6, 0x87, 0xda, 0x89, 0x1e, 0x94, 0x42, + 0xc2, 0x87, 0x00, 0xf4, 0x02, 0x39, 0x70, 0xab, 0x20, 0x5f, 0xe8, 0xb3, 0x3a, 0x14, 0x09, 0x70, + 0xd8, 0xe4, 0xa2, 0x18, 0x5b, 0xe3, 0x43, 0x05, 0xac, 0xe7, 0xec, 0x71, 0xf8, 0xbd, 0x84, 0xea, + 0xdd, 0x4a, 0xa9, 0xde, 0xb5, 0x9c, 0xb0, 0x98, 0xf4, 0xf5, 0xc0, 0x22, 0xf3, 0x1d, 0x9a, 0x39, + 0xf0, 0x21, 0xe2, 0x04, 0x7b, 0x55, 0x96, 0x3b, 0x8a, 0x03, 0xa3, 0x63, 0xf8, 0xca, 0xf8, 0xa4, + 0xbe, 0x98, 0xe8, 0x43, 0x49, 0xce, 0xc6, 0x2f, 0x0b, 0x00, 0x6c, 0x13, 0x5b, 0xb7, 0x46, 0x06, + 0x31, 0x2f, 0xc2, 0xb5, 0x6c, 0x27, 0x5c, 0x4b, 0x43, 0xba, 0x10, 0x61, 0x3e, 0xb9, 0xb6, 0x65, + 0x27, 0x65, 0x5b, 0xbe, 0x31, 0x81, 0xe7, 0x74, 0xdf, 0xf2, 0x8f, 0x22, 0x58, 0x89, 0xc0, 0x91, + 0x71, 0xb9, 0x97, 0x58, 0xc2, 0x57, 0x52, 0x4b, 0xb8, 0x2e, 0x09, 0x79, 0x6e, 0xce, 0xe5, 0xf3, + 0x77, 0x10, 0xf0, 0x03, 0xb0, 0xc4, 0xac, 0x8a, 0x5f, 0x08, 0xdc, 0x08, 0xcd, 0x9e, 0xd9, 0x08, + 0x85, 0x42, 0xb6, 0x93, 0x60, 0x42, 0x29, 0xe6, 0x1c, 0xe3, 0x35, 0xf7, 0xbc, 0x8d, 0x57, 0xe3, + 0x0f, 0x0a, 0x58, 0x8a, 0x96, 0xe9, 0x02, 0x6c, 0x52, 0x3b, 0x69, 0x93, 0x6a, 0xa7, 0xd7, 0x65, + 0x8e, 0x4f, 0xfa, 0x7b, 0x29, 0x9e, 0x35, 0x37, 0x4a, 0x1b, 0xec, 0x85, 0xca, 0xd6, 0xb5, 0x1e, + 0x76, 0x85, 0xac, 0x5e, 0xf2, 0x5f, 0xa6, 0xfc, 0x36, 0x14, 0xf6, 0x26, 0x2c, 0x55, 0xe1, 0xf9, + 0x5a, 0xaa, 0xe2, 0xe7, 0x63, 0xa9, 0xee, 0x83, 0x8a, 0x1b, 0x98, 0xa9, 0x12, 0xa7, 0xbc, 0x31, + 0x69, 0x3b, 0x0b, 0x1f, 0x15, 0xb2, 0x86, 0x0e, 0x2a, 0x64, 0x92, 0x79, 0xa7, 0xf2, 0x17, 0xe9, + 0x9d, 0xd8, 0x16, 0xb6, 0xb1, 0xe7, 0x92, 0x3e, 0xaf, 0xfb, 0x4a, 0xb4, 0x85, 0xf7, 0x79, 0x2b, + 0x12, 0xbd, 0xf0, 0x10, 0xac, 0xdb, 0x8e, 0x35, 0x70, 0x88, 0xeb, 0x6e, 0x13, 0xdc, 0xd7, 0x35, + 0x93, 0x04, 0x03, 0xf0, 0x55, 0xef, 0xda, 0xf8, 0xa4, 0xbe, 0xbe, 0x2f, 0x87, 0xa0, 0xbc, 0xd8, + 0xc6, 0x9f, 0x4b, 0xe0, 0x72, 0xfa, 0x44, 0xcc, 0x31, 0x22, 0xca, 0xb9, 0x8c, 0xc8, 0x6b, 0xb1, + 0x12, 0xf5, 0x5d, 0x5a, 0xec, 0x9d, 0x3f, 0x53, 0xa6, 0x5b, 0x60, 0x59, 0x18, 0x8f, 0xa0, 0x53, + 0x58, 0xb1, 0x70, 0x79, 0x0e, 0x93, 0xdd, 0x28, 0x8d, 0x67, 0xf6, 0x22, 0x72, 0x0d, 0x01, 0x49, + 0x29, 0x69, 0x2f, 0xb6, 0xd2, 0x00, 0x94, 0x8d, 0x81, 0x1d, 0xb0, 0xe2, 0x99, 0x59, 0x2a, 0xbf, + 0x5c, 0xae, 0x09, 0xaa, 0x95, 0xc3, 0x2c, 0x04, 0xc9, 0xe2, 0xe0, 0x8f, 0x13, 0x8e, 0x63, 0x96, + 0x1f, 0x04, 0xaf, 0x9c, 0x5e, 0xd1, 0x53, 0x5b, 0x0e, 0x78, 0x0f, 0x2c, 0x3a, 0xdc, 0x50, 0x06, + 0x59, 0xfa, 0xa6, 0xec, 0x2b, 0x22, 0x6c, 0x11, 0xc5, 0x3b, 0x51, 0x12, 0x2b, 0xf1, 0x51, 0x95, + 0x69, 0x7d, 0x54, 0xe3, 0x4f, 0x0a, 0x80, 0xd9, 0x2d, 0x38, 0xf1, 0xe5, 0x3e, 0x13, 0x11, 0x93, + 0xc8, 0xbe, 0xdc, 0xe1, 0xdc, 0x9a, 0xec, 0x70, 0xa2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, + 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x51, 0x3e, 0xcf, 0x66, 0x71, 0x62, 0x3c, 0xa7, 0x5b, 0x9c, + 0x7f, 0x17, 0xc0, 0x4a, 0x04, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0x5e, 0x5c, 0xce, 0x4c, 0xbe, 0x9c, + 0x61, 0xb6, 0x23, 0x9a, 0xba, 0xff, 0x13, 0xdb, 0x11, 0x25, 0x94, 0x63, 0x3b, 0x7e, 0x57, 0x88, + 0x67, 0xfd, 0xa5, 0xb7, 0x1d, 0xcf, 0x7e, 0xb9, 0xd2, 0xf8, 0x4b, 0x11, 0x5c, 0x4e, 0x6f, 0xc1, + 0x84, 0x0e, 0x2a, 0x13, 0x75, 0x70, 0x1f, 0xac, 0x3e, 0xf2, 0x74, 0x7d, 0xc4, 0xa7, 0x21, 0x26, + 0x86, 0xbe, 0x82, 0x7e, 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0x1c, 0x4d, 0x2f, + 0x9e, 0x4b, 0xd3, 0x33, 0x6a, 0x53, 0x3a, 0x83, 0xda, 0x48, 0xf5, 0xb9, 0x7c, 0x0e, 0x7d, 0x9e, + 0x5a, 0x50, 0x25, 0xc7, 0xd5, 0xc4, 0x77, 0xf8, 0x5f, 0x2b, 0x60, 0x4d, 0xfe, 0xfa, 0x0c, 0x75, + 0xb0, 0x64, 0xe0, 0xc7, 0xf1, 0xcb, 0x8b, 0x49, 0x82, 0xe1, 0x51, 0x4d, 0x57, 0xfd, 0xaf, 0x3b, + 0xea, 0xbb, 0x26, 0xdd, 0x73, 0x0e, 0xa8, 0xa3, 0x99, 0x03, 0x5f, 0x60, 0x3b, 0x09, 0x2e, 0x94, + 0xe2, 0x6e, 0x7c, 0xa2, 0x80, 0xf5, 0x1c, 0x95, 0xbb, 0xd8, 0x4c, 0xe0, 0x43, 0x50, 0x31, 0xf0, + 0xe3, 0x03, 0xcf, 0x19, 0x04, 0x92, 0x7c, 0xf6, 0xe7, 0xf0, 0x8d, 0xdc, 0x11, 0x2c, 0x28, 0xe4, + 0x6b, 0xec, 0x81, 0xeb, 0x89, 0x41, 0xb2, 0x4d, 0x43, 0x1e, 0x79, 0x3a, 0xdf, 0x3f, 0xc2, 0x53, + 0xdc, 0x02, 0xf3, 0x36, 0x76, 0xa8, 0x16, 0x9a, 0xd1, 0x72, 0x6b, 0x71, 0x7c, 0x52, 0x9f, 0xdf, + 0x0f, 0x1a, 0x51, 0xd4, 0xdf, 0xf8, 0x55, 0x01, 0x2c, 0xc4, 0x48, 0x2e, 0x40, 0xdf, 0xdf, 0x4e, + 0xe8, 0xbb, 0xf4, 0x8b, 0x49, 0x7c, 0x54, 0x79, 0x02, 0xdf, 0x49, 0x09, 0xfc, 0x37, 0x27, 0x11, + 0x9d, 0xae, 0xf0, 0x9f, 0x16, 0xc0, 0x6a, 0x0c, 0x1d, 0x49, 0xfc, 0x77, 0x12, 0x12, 0xbf, 0x91, + 0x92, 0xf8, 0xaa, 0x2c, 0xe6, 0x85, 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x36, 0x77, + 0x17, 0x20, 0xf2, 0xdb, 0x49, 0x91, 0xaf, 0x4f, 0xa8, 0x97, 0x1c, 0x95, 0x7f, 0x52, 0x4e, 0xe4, + 0xfd, 0xa5, 0x97, 0xf9, 0x9f, 0x83, 0xd5, 0xa1, 0xa5, 0x7b, 0x06, 0x69, 0xeb, 0x58, 0x33, 0x02, + 0x00, 0x53, 0x32, 0x36, 0x89, 0x37, 0xa5, 0xf4, 0xc4, 0x71, 0x35, 0x97, 0x12, 0x93, 0x3e, 0x88, + 0x22, 0x23, 0x2d, 0x7e, 0x20, 0xa1, 0x43, 0xd2, 0x87, 0xc0, 0x37, 0xc0, 0x02, 0xd3, 0x54, 0xad, + 0x47, 0x76, 0xb1, 0x11, 0xd4, 0x54, 0xf8, 0x7d, 0xe0, 0x20, 0xea, 0x42, 0x71, 0x1c, 0x3c, 0x02, + 0x2b, 0xb6, 0xd5, 0xef, 0x60, 0x13, 0x0f, 0x08, 0x3b, 0xff, 0xf7, 0xf9, 0xff, 0x42, 0xf0, 0x7b, + 0x87, 0xf9, 0xd6, 0x9b, 0xc1, 0x0b, 0xe9, 0x7e, 0x16, 0xc2, 0x3c, 0xbb, 0xa4, 0x99, 0xef, 0x67, + 0x19, 0x25, 0x34, 0x32, 0x9f, 0xb3, 0xe6, 0x32, 0xff, 0x03, 0x20, 0x2b, 0xae, 0x73, 0x7e, 0xd0, + 0xca, 0xbb, 0x51, 0xa9, 0x9c, 0xeb, 0x6b, 0xd4, 0xa7, 0x25, 0x70, 0x25, 0x73, 0x40, 0x7e, 0x81, + 0x77, 0x1a, 0x19, 0xb7, 0x54, 0x3c, 0x83, 0x5b, 0xda, 0x02, 0xcb, 0xe2, 0x43, 0x58, 0xca, 0x6c, + 0x85, 0x76, 0xb4, 0x9d, 0xec, 0x46, 0x69, 0xbc, 0xec, 0x4e, 0xa5, 0x7c, 0xc6, 0x3b, 0x95, 0x78, + 0x16, 0xe2, 0xff, 0x37, 0xfc, 0xaa, 0xcb, 0x66, 0x21, 0xfe, 0x8d, 0x23, 0x8d, 0x87, 0xdf, 0x0d, + 0x4a, 0x2a, 0x64, 0x98, 0xe3, 0x0c, 0xa9, 0x1a, 0x09, 0x09, 0x52, 0xe8, 0x67, 0xfa, 0xd8, 0xf3, + 0xbe, 0xe4, 0x63, 0xcf, 0xc6, 0x84, 0x52, 0x9e, 0xde, 0x2a, 0xfe, 0x4d, 0x01, 0x2f, 0xe5, 0xee, + 0x01, 0xb8, 0x95, 0xd0, 0xd9, 0xdb, 0x29, 0x9d, 0x7d, 0x39, 0x37, 0x30, 0x26, 0xb6, 0x86, 0xfc, + 0x42, 0xe4, 0xee, 0xc4, 0x0b, 0x11, 0x89, 0x8b, 0x9a, 0x7c, 0x33, 0xd2, 0xda, 0x78, 0xf2, 0xb4, + 0x36, 0xf3, 0xd1, 0xd3, 0xda, 0xcc, 0xc7, 0x4f, 0x6b, 0x33, 0xbf, 0x18, 0xd7, 0x94, 0x27, 0xe3, + 0x9a, 0xf2, 0xd1, 0xb8, 0xa6, 0x7c, 0x3c, 0xae, 0x29, 0xff, 0x1c, 0xd7, 0x94, 0xdf, 0x7e, 0x52, + 0x9b, 0x79, 0x58, 0x18, 0x6e, 0xfe, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x70, 0x5f, 0xbf, 0x58, 0x3d, + 0x26, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 8fa54afd..c0499d32 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -31,6 +31,38 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. @@ -52,6 +84,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -67,9 +120,8 @@ message DaemonSetList { message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. @@ -143,6 +195,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. @@ -160,6 +218,262 @@ message DaemonSetUpdateStrategy { optional RollingUpdateDaemonSet rollingUpdate = 2; } +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + // Spec to control the desired behavior of daemon set rolling update. message RollingUpdateDaemonSet { // The maximum number of DaemonSet pods that can be unavailable during the @@ -179,3 +493,209 @@ message RollingUpdateDaemonSet { // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; } + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go index 586b53ef..02710104 100644 --- a/vendor/k8s.io/api/apps/v1/register.go +++ b/vendor/k8s.io/api/apps/v1/register.go @@ -41,11 +41,19 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &StatefulSet{}, + &StatefulSetList{}, &DaemonSet{}, &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index dc589e65..f32300fe 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -19,14 +19,440 @@ package v1 import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) const ( ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is atmost 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. type DaemonSetUpdateStrategy struct { // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. @@ -76,10 +502,9 @@ type RollingUpdateDaemonSet struct { type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -152,6 +577,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -199,3 +651,169 @@ type DaemonSetList struct { // A list of daemon sets. Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 4cc96dc4..76305393 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -27,6 +27,27 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_ControllerRevision = map[string]string{ + "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -38,6 +59,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -50,7 +84,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", @@ -72,6 +106,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -88,6 +123,143 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { return map_DaemonSetUpdateStrategy } +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + var map_RollingUpdateDaemonSet = map[string]string{ "": "Spec to control the desired behavior of daemon set rolling update.", "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", @@ -97,4 +269,97 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { return map_RollingUpdateDaemonSet } +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 72d654a5..c41db298 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -21,48 +21,72 @@ limitations under the License. package v1 import ( + core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -94,6 +118,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -176,6 +217,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -214,6 +262,336 @@ func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { *out = *in @@ -238,3 +616,251 @@ func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { in.DeepCopyInto(out) return out } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + if *in == nil { + *out = nil + } else { + *out = new(intstr.IntOrString) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + if *in == nil { + *out = nil + } else { + *out = new(meta_v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]core_v1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + if *in == nil { + *out = nil + } else { + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 158f0000..6047ed50 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index 6d5e1f7a..baee7a97 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -41,6 +41,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -144,22 +145,26 @@ func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } + func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func init() { @@ -179,6 +184,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") @@ -840,6 +846,48 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -858,11 +906,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n24, err := m.ListMeta.MarshalTo(dAtA[i:]) + n25, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n24 + i += n25 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -902,20 +950,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n25, err := m.Selector.MarshalTo(dAtA[i:]) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n26, err := m.Template.MarshalTo(dAtA[i:]) + n27, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -939,11 +987,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n27, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -997,6 +1045,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1023,11 +1083,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n28, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 } return i, nil } @@ -1286,6 +1346,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1347,6 +1423,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1591,6 +1673,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -1632,6 +1728,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4017,6 +4114,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -4603,6 +4896,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4846,119 +5170,122 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1820 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0xd6, 0x50, 0xa4, 0x44, 0xb6, 0x56, 0x94, 0xd5, 0x52, 0x24, 0xae, 0x36, 0xa1, 0x04, 0x66, - 0xb1, 0x2b, 0xef, 0xae, 0x86, 0x6b, 0xd9, 0x31, 0xfc, 0x00, 0x8c, 0x88, 0x94, 0x63, 0xcb, 0x90, - 0x22, 0xb9, 0x29, 0x39, 0x88, 0x93, 0x00, 0x6e, 0x0e, 0xdb, 0xd4, 0x58, 0xf3, 0xc2, 0x4c, 0x0f, - 0x63, 0x22, 0x97, 0xdc, 0x13, 0xc0, 0x39, 0xe7, 0x57, 0xe4, 0x18, 0x24, 0xb7, 0x9c, 0x74, 0x09, - 0x60, 0xe4, 0x12, 0x9f, 0x84, 0x98, 0xbe, 0xe6, 0x9a, 0x8b, 0x81, 0x00, 0x41, 0xf7, 0xf4, 0xbc, - 0x67, 0x24, 0x2a, 0x40, 0x74, 0xc8, 0x8d, 0xd3, 0x55, 0xf5, 0x55, 0x75, 0x77, 0x75, 0xd5, 0x57, - 0x04, 0x3f, 0x3c, 0xb9, 0xe3, 0xc8, 0xaa, 0xd9, 0x3c, 0x71, 0xbb, 0xc4, 0x36, 0x08, 0x25, 0x4e, - 0x73, 0x40, 0x8c, 0x9e, 0x69, 0x37, 0x85, 0x00, 0x5b, 0x6a, 0x13, 0x5b, 0x96, 0xd3, 0x1c, 0xdc, - 0xe8, 0x12, 0x8a, 0x6f, 0x34, 0xfb, 0xc4, 0x20, 0x36, 0xa6, 0xa4, 0x27, 0x5b, 0xb6, 0x49, 0x4d, - 0xb8, 0xec, 0x29, 0xca, 0xd8, 0x52, 0x65, 0xa6, 0x28, 0x0b, 0xc5, 0x95, 0x8d, 0xbe, 0x4a, 0x8f, - 0xdd, 0xae, 0xac, 0x98, 0x7a, 0xb3, 0x6f, 0xf6, 0xcd, 0x26, 0xd7, 0xef, 0xba, 0x2f, 0xf9, 0x17, - 0xff, 0xe0, 0xbf, 0x3c, 0x9c, 0x95, 0x46, 0xc4, 0xa1, 0x62, 0xda, 0xa4, 0x39, 0x48, 0xf9, 0x5a, - 0xb9, 0x1e, 0xd1, 0xb1, 0x4c, 0x4d, 0x55, 0x86, 0x79, 0x61, 0xad, 0xdc, 0x0a, 0x55, 0x75, 0xac, - 0x1c, 0xab, 0x06, 0xb1, 0x87, 0x4d, 0xeb, 0xa4, 0xcf, 0x16, 0x9c, 0xa6, 0x4e, 0x28, 0xce, 0x72, - 0xd0, 0xcc, 0xb3, 0xb2, 0x5d, 0x83, 0xaa, 0x3a, 0x49, 0x19, 0xdc, 0xbe, 0xc8, 0xc0, 0x51, 0x8e, - 0x89, 0x8e, 0x53, 0x76, 0x37, 0xf3, 0xec, 0x5c, 0xaa, 0x6a, 0x4d, 0xd5, 0xa0, 0x0e, 0xb5, 0x93, - 0x46, 0x8d, 0x7f, 0x49, 0x00, 0xb6, 0x4d, 0x83, 0xda, 0xa6, 0xa6, 0x11, 0x1b, 0x91, 0x81, 0xea, - 0xa8, 0xa6, 0x01, 0x5f, 0x80, 0x32, 0xdb, 0x4f, 0x0f, 0x53, 0x5c, 0x93, 0xd6, 0xa4, 0xf5, 0x99, - 0xcd, 0x6f, 0xe5, 0xf0, 0x52, 0x02, 0x78, 0xd9, 0x3a, 0xe9, 0xb3, 0x05, 0x47, 0x66, 0xda, 0xf2, - 0xe0, 0x86, 0xbc, 0xdf, 0x7d, 0x45, 0x14, 0xba, 0x47, 0x28, 0x6e, 0xc1, 0xd3, 0xb3, 0xd5, 0x89, - 0xd1, 0xd9, 0x2a, 0x08, 0xd7, 0x50, 0x80, 0x0a, 0xf7, 0x41, 0x91, 0xa3, 0x17, 0x38, 0xfa, 0x46, - 0x2e, 0xba, 0xd8, 0xb4, 0x8c, 0xf0, 0x2f, 0x1f, 0xbe, 0xa6, 0xc4, 0x60, 0xe1, 0xb5, 0x3e, 0x11, - 0xd0, 0xc5, 0x6d, 0x4c, 0x31, 0xe2, 0x40, 0xf0, 0x1b, 0x50, 0xb6, 0x45, 0xf8, 0xb5, 0xc9, 0x35, - 0x69, 0x7d, 0xb2, 0x75, 0x4d, 0x68, 0x95, 0xfd, 0x6d, 0xa1, 0x40, 0xa3, 0x71, 0x2a, 0x81, 0xa5, - 0xf4, 0xbe, 0x77, 0x55, 0x87, 0xc2, 0x9f, 0xa7, 0xf6, 0x2e, 0x8f, 0xb7, 0x77, 0x66, 0xcd, 0x77, - 0x1e, 0x38, 0xf6, 0x57, 0x22, 0xfb, 0x3e, 0x00, 0x25, 0x95, 0x12, 0xdd, 0xa9, 0x15, 0xd6, 0x26, - 0xd7, 0x67, 0x36, 0xbf, 0x96, 0x73, 0x72, 0x5d, 0x4e, 0x47, 0xd7, 0x9a, 0x15, 0xb8, 0xa5, 0x1d, - 0x86, 0x80, 0x3c, 0xa0, 0xc6, 0x6f, 0x0b, 0x00, 0x6c, 0x13, 0x4b, 0x33, 0x87, 0x3a, 0x31, 0xe8, - 0x15, 0x5c, 0xdd, 0x0e, 0x28, 0x3a, 0x16, 0x51, 0xc4, 0xd5, 0x7d, 0x99, 0xbb, 0x83, 0x30, 0xa8, - 0x8e, 0x45, 0x94, 0xf0, 0xd2, 0xd8, 0x17, 0xe2, 0x10, 0xf0, 0x29, 0x98, 0x72, 0x28, 0xa6, 0xae, - 0xc3, 0xaf, 0x6c, 0x66, 0xf3, 0xfa, 0x38, 0x60, 0xdc, 0xa0, 0x55, 0x15, 0x70, 0x53, 0xde, 0x37, - 0x12, 0x40, 0x8d, 0xbf, 0x4f, 0x82, 0x85, 0x50, 0xb9, 0x6d, 0x1a, 0x3d, 0x95, 0xb2, 0x94, 0xbe, - 0x0f, 0x8a, 0x74, 0x68, 0x11, 0x7e, 0x26, 0x95, 0xd6, 0x97, 0x7e, 0x30, 0x87, 0x43, 0x8b, 0x7c, - 0x3c, 0x5b, 0x5d, 0xce, 0x30, 0x61, 0x22, 0xc4, 0x8d, 0xe0, 0x6e, 0x10, 0x67, 0x81, 0x9b, 0xdf, - 0x8a, 0x3b, 0xff, 0x78, 0xb6, 0x9a, 0x51, 0x6b, 0xe4, 0x00, 0x29, 0x1e, 0x22, 0xfc, 0x02, 0x4c, - 0xd9, 0x04, 0x3b, 0xa6, 0x51, 0x2b, 0x72, 0xb4, 0x60, 0x2b, 0x88, 0xaf, 0x22, 0x21, 0x85, 0xd7, - 0xc1, 0xb4, 0x4e, 0x1c, 0x07, 0xf7, 0x49, 0xad, 0xc4, 0x15, 0xe7, 0x84, 0xe2, 0xf4, 0x9e, 0xb7, - 0x8c, 0x7c, 0x39, 0x7c, 0x05, 0xaa, 0x1a, 0x76, 0xe8, 0x91, 0xd5, 0xc3, 0x94, 0x1c, 0xaa, 0x3a, - 0xa9, 0x4d, 0xf1, 0x03, 0xfd, 0x6a, 0xbc, 0xbb, 0x67, 0x16, 0xad, 0x25, 0x81, 0x5e, 0xdd, 0x8d, - 0x21, 0xa1, 0x04, 0x32, 0x1c, 0x00, 0xc8, 0x56, 0x0e, 0x6d, 0x6c, 0x38, 0xde, 0x41, 0x31, 0x7f, - 0xd3, 0x97, 0xf6, 0xb7, 0x22, 0xfc, 0xc1, 0xdd, 0x14, 0x1a, 0xca, 0xf0, 0xd0, 0xf8, 0xa3, 0x04, - 0xaa, 0xe1, 0x35, 0x5d, 0xc1, 0x5b, 0x7d, 0x1c, 0x7f, 0xab, 0xdf, 0x1f, 0x23, 0x39, 0x73, 0xde, - 0xe8, 0x3f, 0x0b, 0x00, 0x86, 0x4a, 0xc8, 0xd4, 0xb4, 0x2e, 0x56, 0x4e, 0xe0, 0x1a, 0x28, 0x1a, - 0x58, 0xf7, 0x73, 0x32, 0x78, 0x20, 0x3f, 0xc6, 0x3a, 0x41, 0x5c, 0x02, 0xdf, 0x48, 0x00, 0xba, - 0xfc, 0xe8, 0x7b, 0x5b, 0x86, 0x61, 0x52, 0xcc, 0x4e, 0xc3, 0x0f, 0xa8, 0x3d, 0x46, 0x40, 0xbe, - 0x2f, 0xf9, 0x28, 0x85, 0xf2, 0xd0, 0xa0, 0xf6, 0x30, 0xbc, 0x85, 0xb4, 0x02, 0xca, 0x70, 0x0d, - 0x7f, 0x06, 0x80, 0x2d, 0x30, 0x0f, 0x4d, 0xf1, 0x6c, 0xf3, 0x6b, 0x80, 0xef, 0xbe, 0x6d, 0x1a, - 0x2f, 0xd5, 0x7e, 0x58, 0x58, 0x50, 0x00, 0x81, 0x22, 0x70, 0x2b, 0x0f, 0xc1, 0x72, 0x4e, 0x9c, - 0xf0, 0x1a, 0x98, 0x3c, 0x21, 0x43, 0xef, 0xa8, 0x10, 0xfb, 0x09, 0x17, 0x41, 0x69, 0x80, 0x35, - 0x97, 0x78, 0x6f, 0x12, 0x79, 0x1f, 0xf7, 0x0a, 0x77, 0xa4, 0xc6, 0x1f, 0x4a, 0xd1, 0x4c, 0x61, - 0xf5, 0x06, 0xae, 0xb3, 0xf6, 0x60, 0x69, 0xaa, 0x82, 0x1d, 0x8e, 0x51, 0x6a, 0x7d, 0xe2, 0xb5, - 0x06, 0x6f, 0x0d, 0x05, 0x52, 0xf8, 0x0b, 0x50, 0x76, 0x88, 0x46, 0x14, 0x6a, 0xda, 0xa2, 0xc4, - 0xdd, 0x1c, 0x33, 0xa7, 0x70, 0x97, 0x68, 0x1d, 0x61, 0xea, 0xc1, 0xfb, 0x5f, 0x28, 0x80, 0x84, - 0x4f, 0x41, 0x99, 0x12, 0xdd, 0xd2, 0x30, 0x25, 0xe2, 0xf4, 0x62, 0x79, 0xc5, 0x6a, 0x07, 0x03, - 0x3b, 0x30, 0x7b, 0x87, 0x42, 0x8d, 0x57, 0xcf, 0x20, 0x4f, 0xfd, 0x55, 0x14, 0xc0, 0xc0, 0x9f, - 0x82, 0xb2, 0x43, 0x59, 0x57, 0xef, 0x0f, 0x79, 0x45, 0x39, 0xaf, 0xad, 0x44, 0xeb, 0xa8, 0x67, - 0x12, 0x42, 0xfb, 0x2b, 0x28, 0x80, 0x83, 0x5b, 0x60, 0x4e, 0x57, 0x0d, 0x44, 0x70, 0x6f, 0xd8, - 0x21, 0x8a, 0x69, 0xf4, 0x1c, 0x5e, 0x8a, 0x4a, 0xad, 0x65, 0x61, 0x34, 0xb7, 0x17, 0x17, 0xa3, - 0xa4, 0x3e, 0xdc, 0x05, 0x8b, 0x7e, 0xdb, 0x7d, 0xac, 0x3a, 0xd4, 0xb4, 0x87, 0xbb, 0xaa, 0xae, - 0x52, 0x5e, 0xa0, 0x4a, 0xad, 0xda, 0xe8, 0x6c, 0x75, 0x11, 0x65, 0xc8, 0x51, 0xa6, 0x15, 0xab, - 0x9d, 0x16, 0x76, 0x1d, 0xd2, 0xe3, 0x05, 0xa7, 0x1c, 0xd6, 0xce, 0x03, 0xbe, 0x8a, 0x84, 0x14, - 0xfe, 0x24, 0x96, 0xa6, 0xe5, 0xcb, 0xa5, 0x69, 0x35, 0x3f, 0x45, 0xe1, 0x11, 0x58, 0xb6, 0x6c, - 0xb3, 0x6f, 0x13, 0xc7, 0xd9, 0x26, 0xb8, 0xa7, 0xa9, 0x06, 0xf1, 0x4f, 0xa6, 0xc2, 0x77, 0xf4, - 0xd9, 0xe8, 0x6c, 0x75, 0xf9, 0x20, 0x5b, 0x05, 0xe5, 0xd9, 0x36, 0xfe, 0x52, 0x04, 0xd7, 0x92, - 0x3d, 0x0e, 0x3e, 0x01, 0xd0, 0xec, 0x3a, 0xc4, 0x1e, 0x90, 0xde, 0x23, 0x8f, 0xb8, 0x31, 0x76, - 0x23, 0x71, 0x76, 0x13, 0xbc, 0xdb, 0xfd, 0x94, 0x06, 0xca, 0xb0, 0xf2, 0xf8, 0x91, 0x78, 0x00, - 0x05, 0x1e, 0x68, 0x84, 0x1f, 0xa5, 0x1e, 0xc1, 0x16, 0x98, 0x13, 0x6f, 0xdf, 0x17, 0xf2, 0x64, - 0x8d, 0xdc, 0xfb, 0x51, 0x5c, 0x8c, 0x92, 0xfa, 0xf0, 0x11, 0x98, 0xc7, 0x03, 0xac, 0x6a, 0xb8, - 0xab, 0x91, 0x00, 0xa4, 0xc8, 0x41, 0x3e, 0x15, 0x20, 0xf3, 0x5b, 0x49, 0x05, 0x94, 0xb6, 0x81, - 0x7b, 0x60, 0xc1, 0x35, 0xd2, 0x50, 0x5e, 0x1e, 0x7e, 0x26, 0xa0, 0x16, 0x8e, 0xd2, 0x2a, 0x28, - 0xcb, 0x0e, 0xbe, 0x00, 0x40, 0xf1, 0x1b, 0xb3, 0x53, 0x9b, 0xe2, 0x95, 0xf4, 0x9b, 0x31, 0xde, - 0x4b, 0xd0, 0xcd, 0xc3, 0x2a, 0x16, 0x2c, 0x39, 0x28, 0x82, 0x09, 0xef, 0x83, 0x59, 0x9b, 0xbd, - 0x80, 0x20, 0xd4, 0x69, 0x1e, 0xea, 0x77, 0x84, 0xd9, 0x2c, 0x8a, 0x0a, 0x51, 0x5c, 0x17, 0xde, - 0x03, 0x55, 0xc5, 0xd4, 0x34, 0x9e, 0xf9, 0x6d, 0xd3, 0x35, 0x28, 0x4f, 0xde, 0x52, 0x0b, 0xb2, - 0xce, 0xdc, 0x8e, 0x49, 0x50, 0x42, 0xb3, 0xf1, 0x67, 0x29, 0xda, 0x66, 0xfc, 0xe7, 0x0c, 0xef, - 0xc5, 0xa8, 0xcf, 0x17, 0x09, 0xea, 0xb3, 0x94, 0xb6, 0x88, 0x30, 0x1f, 0x15, 0xcc, 0xb2, 0xe4, - 0x57, 0x8d, 0xbe, 0x77, 0xe1, 0xa2, 0x24, 0x7e, 0x7b, 0xee, 0x53, 0x0a, 0xb4, 0x23, 0x8d, 0x71, - 0x9e, 0xef, 0x3c, 0x2a, 0x44, 0x71, 0xe4, 0xc6, 0x03, 0x50, 0x8d, 0xbf, 0xc3, 0x18, 0xa7, 0x97, - 0x2e, 0xe4, 0xf4, 0x1f, 0x24, 0xb0, 0x9c, 0xe3, 0x1d, 0x6a, 0xa0, 0xaa, 0xe3, 0xd7, 0x91, 0x1c, - 0xb9, 0x90, 0x1b, 0xb3, 0xa9, 0x49, 0xf6, 0xa6, 0x26, 0x79, 0xc7, 0xa0, 0xfb, 0x76, 0x87, 0xda, - 0xaa, 0xd1, 0xf7, 0xee, 0x61, 0x2f, 0x86, 0x85, 0x12, 0xd8, 0xf0, 0x39, 0x28, 0xeb, 0xf8, 0x75, - 0xc7, 0xb5, 0xfb, 0x59, 0xe7, 0x35, 0x9e, 0x1f, 0xde, 0x3f, 0xf6, 0x04, 0x0a, 0x0a, 0xf0, 0x1a, - 0xfb, 0x60, 0x2d, 0xb6, 0x49, 0x56, 0x2a, 0xc8, 0x4b, 0x57, 0xeb, 0x90, 0xf0, 0xc2, 0xbf, 0x06, - 0x15, 0x0b, 0xdb, 0x54, 0x0d, 0xca, 0x45, 0xa9, 0x35, 0x3b, 0x3a, 0x5b, 0xad, 0x1c, 0xf8, 0x8b, - 0x28, 0x94, 0x37, 0xfe, 0x2d, 0x81, 0x52, 0x47, 0xc1, 0x1a, 0xb9, 0x82, 0xd1, 0x61, 0x3b, 0x36, - 0x3a, 0x34, 0x72, 0x93, 0x88, 0xc7, 0x93, 0x3b, 0x35, 0xec, 0x26, 0xa6, 0x86, 0xcf, 0x2f, 0xc0, - 0x39, 0x7f, 0x60, 0xb8, 0x0b, 0x2a, 0x81, 0xbb, 0x58, 0x95, 0x94, 0x2e, 0xaa, 0x92, 0x8d, 0xdf, - 0x17, 0xc0, 0x4c, 0xc4, 0xc5, 0xe5, 0xac, 0xd9, 0x71, 0x47, 0x88, 0x06, 0x2b, 0x43, 0x9b, 0xe3, - 0x6c, 0x44, 0xf6, 0x49, 0x85, 0xc7, 0xdf, 0xc2, 0xee, 0x9d, 0xe6, 0x1a, 0x0f, 0x40, 0x95, 0x62, - 0xbb, 0x4f, 0xa8, 0x2f, 0xe3, 0x07, 0x56, 0x09, 0x99, 0xfe, 0x61, 0x4c, 0x8a, 0x12, 0xda, 0x2b, - 0xf7, 0xc1, 0x6c, 0xcc, 0xd9, 0xa5, 0x48, 0xd8, 0x1b, 0x76, 0x38, 0x61, 0x72, 0x5e, 0x41, 0x76, - 0x3d, 0x89, 0x65, 0xd7, 0x7a, 0xfe, 0x61, 0x46, 0x9e, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0x7d, - 0x35, 0x16, 0xda, 0xf9, 0x99, 0xf6, 0x27, 0x09, 0xcc, 0x45, 0xb4, 0xaf, 0x60, 0x82, 0xd9, 0x89, - 0x4f, 0x30, 0x9f, 0x8f, 0xb3, 0x89, 0x9c, 0x11, 0xe6, 0xaf, 0xa5, 0x58, 0xf0, 0xff, 0xf7, 0xa4, - 0xfa, 0x57, 0x60, 0x71, 0x60, 0x6a, 0xae, 0x4e, 0xda, 0x1a, 0x56, 0x75, 0x5f, 0x81, 0x31, 0x98, - 0xc9, 0xe4, 0x1f, 0x15, 0x01, 0x3c, 0xb1, 0x1d, 0xd5, 0xa1, 0xc4, 0xa0, 0xcf, 0x42, 0xcb, 0xd6, - 0x77, 0x85, 0x93, 0xc5, 0x67, 0x19, 0x70, 0x28, 0xd3, 0x09, 0xfc, 0x01, 0x98, 0x61, 0x04, 0x4e, - 0x55, 0x08, 0x9b, 0x05, 0xc5, 0xf4, 0xbf, 0x20, 0x80, 0x66, 0x3a, 0xa1, 0x08, 0x45, 0xf5, 0xe0, - 0x31, 0x58, 0xb0, 0xcc, 0xde, 0x1e, 0x36, 0x70, 0x9f, 0xb0, 0xb6, 0x77, 0xc0, 0xff, 0xd0, 0xe4, - 0x4c, 0xbb, 0xd2, 0xba, 0xed, 0x33, 0xa5, 0x83, 0xb4, 0xca, 0x47, 0x46, 0x59, 0xd3, 0xcb, 0x9c, - 0x07, 0x64, 0x41, 0x42, 0x1b, 0x54, 0x5d, 0xd1, 0x7e, 0xc4, 0xe0, 0xe1, 0xcd, 0xff, 0x9b, 0xe3, - 0x64, 0xd8, 0x51, 0xcc, 0x32, 0xac, 0x46, 0xf1, 0x75, 0x94, 0xf0, 0x90, 0x3b, 0x48, 0x94, 0xff, - 0x9b, 0x41, 0xa2, 0xf1, 0x9b, 0x22, 0x98, 0x4f, 0x3d, 0x5d, 0xf8, 0xa3, 0x73, 0x18, 0xf7, 0xd2, - 0xff, 0x8c, 0x6d, 0xa7, 0x08, 0xe3, 0xe4, 0x25, 0x08, 0xe3, 0x16, 0x98, 0x53, 0x5c, 0xdb, 0x66, - 0xb3, 0x7e, 0x9c, 0x65, 0x07, 0x54, 0xbd, 0x1d, 0x17, 0xa3, 0xa4, 0x7e, 0x16, 0xdb, 0x2f, 0x5d, - 0x92, 0xed, 0x47, 0xa3, 0x10, 0x8c, 0xcd, 0x4b, 0xbb, 0x74, 0x14, 0x82, 0xb8, 0x25, 0xf5, 0x59, - 0xb7, 0xf2, 0x50, 0x03, 0x84, 0xe9, 0x78, 0xb7, 0x3a, 0x8a, 0x49, 0x51, 0x42, 0x3b, 0x83, 0x39, - 0x57, 0xc6, 0x66, 0xce, 0x7f, 0x93, 0xc0, 0xa7, 0xb9, 0x19, 0x0a, 0xb7, 0x62, 0x04, 0x7a, 0x23, - 0x41, 0xa0, 0xbf, 0x97, 0x6b, 0x18, 0xe1, 0xd1, 0x76, 0x36, 0x8f, 0xbe, 0x3b, 0x1e, 0x8f, 0xce, - 0x20, 0x79, 0x17, 0x13, 0xea, 0xd6, 0xc6, 0xe9, 0xfb, 0xfa, 0xc4, 0xdb, 0xf7, 0xf5, 0x89, 0x77, - 0xef, 0xeb, 0x13, 0xbf, 0x1e, 0xd5, 0xa5, 0xd3, 0x51, 0x5d, 0x7a, 0x3b, 0xaa, 0x4b, 0xef, 0x46, - 0x75, 0xe9, 0x1f, 0xa3, 0xba, 0xf4, 0xbb, 0x0f, 0xf5, 0x89, 0xe7, 0xd3, 0xc2, 0xe3, 0x7f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x3a, 0x2e, 0x29, 0xcd, 0xb9, 0x19, 0x00, 0x00, + // 1871 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x52, 0xa4, 0x44, 0x3d, 0x45, 0x94, 0x3d, 0x52, 0x2d, 0x46, 0x69, 0x29, 0x61, 0x1b, + 0x24, 0x72, 0x12, 0x2d, 0x63, 0x25, 0x0d, 0x12, 0xbb, 0x08, 0x2a, 0xca, 0x6e, 0xe2, 0x40, 0xaa, + 0x94, 0xa1, 0x94, 0xa2, 0x69, 0x0b, 0x64, 0xb8, 0x1c, 0xd3, 0x1b, 0xed, 0x3f, 0xec, 0x0e, 0x59, + 0x13, 0xbd, 0xf4, 0x03, 0x14, 0x48, 0xcf, 0xfd, 0x14, 0x3d, 0x16, 0xed, 0xad, 0x27, 0x5f, 0x0a, + 0x04, 0xbd, 0x34, 0x27, 0xa1, 0xa6, 0xaf, 0x6d, 0x6f, 0xbd, 0x18, 0x28, 0x50, 0xcc, 0xec, 0xec, + 0xff, 0x5d, 0x89, 0x2a, 0x20, 0x1d, 0x72, 0xe3, 0xce, 0x7b, 0xef, 0xf7, 0xde, 0xcc, 0xbc, 0xf7, + 0xe6, 0xfd, 0x08, 0x3f, 0x3a, 0x7d, 0xdf, 0xd7, 0x0c, 0xa7, 0x7d, 0x3a, 0xec, 0x51, 0xcf, 0xa6, + 0x8c, 0xfa, 0xed, 0x11, 0xb5, 0xfb, 0x8e, 0xd7, 0x96, 0x02, 0xe2, 0x1a, 0x6d, 0xe2, 0xba, 0x7e, + 0x7b, 0x74, 0xa7, 0x47, 0x19, 0xb9, 0xd3, 0x1e, 0x50, 0x9b, 0x7a, 0x84, 0xd1, 0xbe, 0xe6, 0x7a, + 0x0e, 0x73, 0xd0, 0x5a, 0xa0, 0xa8, 0x11, 0xd7, 0xd0, 0xb8, 0xa2, 0x26, 0x15, 0xd7, 0xb7, 0x07, + 0x06, 0x7b, 0x3c, 0xec, 0x69, 0xba, 0x63, 0xb5, 0x07, 0xce, 0xc0, 0x69, 0x0b, 0xfd, 0xde, 0xf0, + 0x91, 0xf8, 0x12, 0x1f, 0xe2, 0x57, 0x80, 0xb3, 0xae, 0x26, 0x1c, 0xea, 0x8e, 0x47, 0xdb, 0xa3, + 0x9c, 0xaf, 0xf5, 0xdb, 0x09, 0x1d, 0xd7, 0x31, 0x0d, 0x7d, 0x5c, 0x16, 0xd6, 0xfa, 0xbb, 0xb1, + 0xaa, 0x45, 0xf4, 0xc7, 0x86, 0x4d, 0xbd, 0x71, 0xdb, 0x3d, 0x1d, 0xf0, 0x05, 0xbf, 0x6d, 0x51, + 0x46, 0x8a, 0x1c, 0xb4, 0xcb, 0xac, 0xbc, 0xa1, 0xcd, 0x0c, 0x8b, 0xe6, 0x0c, 0xde, 0xbb, 0xc8, + 0xc0, 0xd7, 0x1f, 0x53, 0x8b, 0xe4, 0xec, 0xde, 0x29, 0xb3, 0x1b, 0x32, 0xc3, 0x6c, 0x1b, 0x36, + 0xf3, 0x99, 0x97, 0x35, 0x52, 0xff, 0xa3, 0x00, 0xda, 0x73, 0x6c, 0xe6, 0x39, 0xa6, 0x49, 0x3d, + 0x4c, 0x47, 0x86, 0x6f, 0x38, 0x36, 0xfa, 0x02, 0xea, 0x7c, 0x3f, 0x7d, 0xc2, 0x48, 0x53, 0xd9, + 0x54, 0xb6, 0x16, 0x77, 0xde, 0xd6, 0xe2, 0x4b, 0x89, 0xe0, 0x35, 0xf7, 0x74, 0xc0, 0x17, 0x7c, + 0x8d, 0x6b, 0x6b, 0xa3, 0x3b, 0xda, 0x61, 0xef, 0x4b, 0xaa, 0xb3, 0x03, 0xca, 0x48, 0x07, 0x3d, + 0x3d, 0xdb, 0x98, 0x99, 0x9c, 0x6d, 0x40, 0xbc, 0x86, 0x23, 0x54, 0x74, 0x08, 0x55, 0x81, 0x5e, + 0x11, 0xe8, 0xdb, 0xa5, 0xe8, 0x72, 0xd3, 0x1a, 0x26, 0xbf, 0x7a, 0xf0, 0x84, 0x51, 0x9b, 0x87, + 0xd7, 0x79, 0x49, 0x42, 0x57, 0xef, 0x13, 0x46, 0xb0, 0x00, 0x42, 0x6f, 0x41, 0xdd, 0x93, 0xe1, + 0x37, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xce, 0x0d, 0xa9, 0x55, 0x0f, 0xb7, 0x85, 0x23, 0x0d, 0xf5, + 0xa9, 0x02, 0xb7, 0xf2, 0xfb, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0xdb, 0xbb, 0x36, 0xdd, 0xde, + 0xb9, 0xb5, 0xd8, 0x79, 0xe4, 0x38, 0x5c, 0x49, 0xec, 0xfb, 0x08, 0x6a, 0x06, 0xa3, 0x96, 0xdf, + 0xac, 0x6c, 0xce, 0x6e, 0x2d, 0xee, 0xbc, 0xa9, 0x95, 0xe4, 0xba, 0x96, 0x8f, 0xae, 0xb3, 0x24, + 0x71, 0x6b, 0x0f, 0x39, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0x56, 0x00, 0xee, 0x53, 0xd7, 0x74, 0xc6, + 0x16, 0xb5, 0xd9, 0x35, 0x5c, 0xdd, 0x43, 0xa8, 0xfa, 0x2e, 0xd5, 0xe5, 0xd5, 0xbd, 0x5e, 0xba, + 0x83, 0x38, 0xa8, 0xae, 0x4b, 0xf5, 0xf8, 0xd2, 0xf8, 0x17, 0x16, 0x10, 0xe8, 0x53, 0x98, 0xf3, + 0x19, 0x61, 0x43, 0x5f, 0x5c, 0xd9, 0xe2, 0xce, 0xed, 0x69, 0xc0, 0x84, 0x41, 0xa7, 0x21, 0xe1, + 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xd4, 0xbf, 0xcf, 0xc2, 0x4a, 0xac, 0xbc, 0xe7, 0xd8, 0x7d, 0x83, + 0xf1, 0x94, 0xbe, 0x07, 0x55, 0x36, 0x76, 0xa9, 0x38, 0x93, 0x85, 0xce, 0xeb, 0x61, 0x30, 0xc7, + 0x63, 0x97, 0xbe, 0x38, 0xdb, 0x58, 0x2b, 0x30, 0xe1, 0x22, 0x2c, 0x8c, 0xd0, 0x7e, 0x14, 0x67, + 0x45, 0x98, 0xbf, 0x9b, 0x76, 0xfe, 0xe2, 0x6c, 0xa3, 0xa0, 0xd7, 0x68, 0x11, 0x52, 0x3a, 0x44, + 0xf4, 0x1a, 0xcc, 0x79, 0x94, 0xf8, 0x8e, 0xdd, 0xac, 0x0a, 0xb4, 0x68, 0x2b, 0x58, 0xac, 0x62, + 0x29, 0x45, 0xb7, 0x61, 0xde, 0xa2, 0xbe, 0x4f, 0x06, 0xb4, 0x59, 0x13, 0x8a, 0xcb, 0x52, 0x71, + 0xfe, 0x20, 0x58, 0xc6, 0xa1, 0x1c, 0x7d, 0x09, 0x0d, 0x93, 0xf8, 0xec, 0xc4, 0xed, 0x13, 0x46, + 0x8f, 0x0d, 0x8b, 0x36, 0xe7, 0xc4, 0x81, 0xbe, 0x31, 0xdd, 0xdd, 0x73, 0x8b, 0xce, 0x2d, 0x89, + 0xde, 0xd8, 0x4f, 0x21, 0xe1, 0x0c, 0x32, 0x1a, 0x01, 0xe2, 0x2b, 0xc7, 0x1e, 0xb1, 0xfd, 0xe0, + 0xa0, 0xb8, 0xbf, 0xf9, 0x4b, 0xfb, 0x5b, 0x97, 0xfe, 0xd0, 0x7e, 0x0e, 0x0d, 0x17, 0x78, 0x50, + 0xff, 0xa8, 0x40, 0x23, 0xbe, 0xa6, 0x6b, 0xa8, 0xd5, 0x8f, 0xd3, 0xb5, 0xfa, 0xfd, 0x29, 0x92, + 0xb3, 0xa4, 0x46, 0xff, 0x59, 0x01, 0x14, 0x2b, 0x61, 0xc7, 0x34, 0x7b, 0x44, 0x3f, 0x45, 0x9b, + 0x50, 0xb5, 0x89, 0x15, 0xe6, 0x64, 0x54, 0x20, 0x3f, 0x21, 0x16, 0xc5, 0x42, 0x82, 0xbe, 0x52, + 0x00, 0x0d, 0xc5, 0xd1, 0xf7, 0x77, 0x6d, 0xdb, 0x61, 0x84, 0x9f, 0x46, 0x18, 0xd0, 0xde, 0x14, + 0x01, 0x85, 0xbe, 0xb4, 0x93, 0x1c, 0xca, 0x03, 0x9b, 0x79, 0xe3, 0xf8, 0x16, 0xf2, 0x0a, 0xb8, + 0xc0, 0x35, 0xfa, 0x39, 0x80, 0x27, 0x31, 0x8f, 0x1d, 0x59, 0xb6, 0xe5, 0x3d, 0x20, 0x74, 0xbf, + 0xe7, 0xd8, 0x8f, 0x8c, 0x41, 0xdc, 0x58, 0x70, 0x04, 0x81, 0x13, 0x70, 0xeb, 0x0f, 0x60, 0xad, + 0x24, 0x4e, 0x74, 0x03, 0x66, 0x4f, 0xe9, 0x38, 0x38, 0x2a, 0xcc, 0x7f, 0xa2, 0x55, 0xa8, 0x8d, + 0x88, 0x39, 0xa4, 0x41, 0x4d, 0xe2, 0xe0, 0xe3, 0x6e, 0xe5, 0x7d, 0x45, 0xfd, 0x43, 0x2d, 0x99, + 0x29, 0xbc, 0xdf, 0xa0, 0x2d, 0xfe, 0x3c, 0xb8, 0xa6, 0xa1, 0x13, 0x5f, 0x60, 0xd4, 0x3a, 0x2f, + 0x05, 0x4f, 0x43, 0xb0, 0x86, 0x23, 0x29, 0xfa, 0x25, 0xd4, 0x7d, 0x6a, 0x52, 0x9d, 0x39, 0x9e, + 0x6c, 0x71, 0xef, 0x4c, 0x99, 0x53, 0xa4, 0x47, 0xcd, 0xae, 0x34, 0x0d, 0xe0, 0xc3, 0x2f, 0x1c, + 0x41, 0xa2, 0x4f, 0xa1, 0xce, 0xa8, 0xe5, 0x9a, 0x84, 0x51, 0x79, 0x7a, 0xa9, 0xbc, 0xe2, 0xbd, + 0x83, 0x83, 0x1d, 0x39, 0xfd, 0x63, 0xa9, 0x26, 0xba, 0x67, 0x94, 0xa7, 0xe1, 0x2a, 0x8e, 0x60, + 0xd0, 0xcf, 0xa0, 0xee, 0x33, 0xfe, 0xaa, 0x0f, 0xc6, 0xa2, 0xa3, 0x9c, 0xf7, 0xac, 0x24, 0xfb, + 0x68, 0x60, 0x12, 0x43, 0x87, 0x2b, 0x38, 0x82, 0x43, 0xbb, 0xb0, 0x6c, 0x19, 0x36, 0xa6, 0xa4, + 0x3f, 0xee, 0x52, 0xdd, 0xb1, 0xfb, 0xbe, 0x68, 0x45, 0xb5, 0xce, 0x9a, 0x34, 0x5a, 0x3e, 0x48, + 0x8b, 0x71, 0x56, 0x1f, 0xed, 0xc3, 0x6a, 0xf8, 0xec, 0x7e, 0x6c, 0xf8, 0xcc, 0xf1, 0xc6, 0xfb, + 0x86, 0x65, 0x30, 0xd1, 0xa0, 0x6a, 0x9d, 0xe6, 0xe4, 0x6c, 0x63, 0x15, 0x17, 0xc8, 0x71, 0xa1, + 0x15, 0xef, 0x9d, 0x2e, 0x19, 0xfa, 0xb4, 0x2f, 0x1a, 0x4e, 0x3d, 0xee, 0x9d, 0x47, 0x62, 0x15, + 0x4b, 0x29, 0xfa, 0x69, 0x2a, 0x4d, 0xeb, 0x97, 0x4b, 0xd3, 0x46, 0x79, 0x8a, 0xa2, 0x13, 0x58, + 0x73, 0x3d, 0x67, 0xe0, 0x51, 0xdf, 0xbf, 0x4f, 0x49, 0xdf, 0x34, 0x6c, 0x1a, 0x9e, 0xcc, 0x82, + 0xd8, 0xd1, 0x2b, 0x93, 0xb3, 0x8d, 0xb5, 0xa3, 0x62, 0x15, 0x5c, 0x66, 0xab, 0xfe, 0xa5, 0x0a, + 0x37, 0xb2, 0x6f, 0x1c, 0xfa, 0x04, 0x90, 0xd3, 0xf3, 0xa9, 0x37, 0xa2, 0xfd, 0x8f, 0x82, 0xc1, + 0x8d, 0x4f, 0x37, 0x8a, 0x98, 0x6e, 0xa2, 0xba, 0x3d, 0xcc, 0x69, 0xe0, 0x02, 0xab, 0x60, 0x3e, + 0x92, 0x05, 0x50, 0x11, 0x81, 0x26, 0xe6, 0xa3, 0x5c, 0x11, 0xec, 0xc2, 0xb2, 0xac, 0xfd, 0x50, + 0x28, 0x92, 0x35, 0x71, 0xef, 0x27, 0x69, 0x31, 0xce, 0xea, 0xa3, 0x8f, 0xe0, 0x26, 0x19, 0x11, + 0xc3, 0x24, 0x3d, 0x93, 0x46, 0x20, 0x55, 0x01, 0xf2, 0xb2, 0x04, 0xb9, 0xb9, 0x9b, 0x55, 0xc0, + 0x79, 0x1b, 0x74, 0x00, 0x2b, 0x43, 0x3b, 0x0f, 0x15, 0xe4, 0xe1, 0x2b, 0x12, 0x6a, 0xe5, 0x24, + 0xaf, 0x82, 0x8b, 0xec, 0xd0, 0x17, 0x00, 0x7a, 0xf8, 0x30, 0xfb, 0xcd, 0x39, 0xd1, 0x49, 0xdf, + 0x9a, 0xa2, 0x5e, 0xa2, 0xd7, 0x3c, 0xee, 0x62, 0xd1, 0x92, 0x8f, 0x13, 0x98, 0xe8, 0x1e, 0x2c, + 0x79, 0xbc, 0x02, 0xa2, 0x50, 0xe7, 0x45, 0xa8, 0xdf, 0x91, 0x66, 0x4b, 0x38, 0x29, 0xc4, 0x69, + 0x5d, 0x74, 0x17, 0x1a, 0xba, 0x63, 0x9a, 0x22, 0xf3, 0xf7, 0x9c, 0xa1, 0xcd, 0x44, 0xf2, 0xd6, + 0x3a, 0x88, 0xbf, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, 0xcf, 0x4c, 0x58, + 0xce, 0xe8, 0x6e, 0x6a, 0xf4, 0x79, 0x2d, 0x33, 0xfa, 0xdc, 0xca, 0x5b, 0x24, 0x26, 0x1f, 0x03, + 0x96, 0x78, 0xf2, 0x1b, 0xf6, 0x20, 0xb8, 0x70, 0xd9, 0x12, 0xdf, 0x3e, 0xb7, 0x94, 0x22, 0xed, + 0xc4, 0xc3, 0x78, 0x53, 0xec, 0x3c, 0x29, 0xc4, 0x69, 0x64, 0xf5, 0x43, 0x68, 0xa4, 0xeb, 0x30, + 0x35, 0xd3, 0x2b, 0x17, 0xce, 0xf4, 0xcf, 0x15, 0x58, 0x2b, 0xf1, 0x8e, 0x4c, 0x68, 0x58, 0xe4, + 0x49, 0x22, 0x47, 0x2e, 0x9c, 0x8d, 0x39, 0x6b, 0xd2, 0x02, 0xd6, 0xa4, 0x3d, 0xb4, 0xd9, 0xa1, + 0xd7, 0x65, 0x9e, 0x61, 0x0f, 0x82, 0x7b, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, 0xfa, 0x1c, 0xea, + 0x16, 0x79, 0xd2, 0x1d, 0x7a, 0x83, 0xa2, 0xf3, 0x9a, 0xce, 0x8f, 0x78, 0x3f, 0x0e, 0x24, 0x0a, + 0x8e, 0xf0, 0xd4, 0x43, 0xd8, 0x4c, 0x6d, 0x92, 0xb7, 0x0a, 0xfa, 0x68, 0x68, 0x76, 0x69, 0x7c, + 0xe1, 0x6f, 0xc2, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xed, 0xa2, 0xd6, 0x59, 0x9a, 0x9c, 0x6d, 0x2c, + 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0xd6, 0xd5, 0x89, 0x49, 0xaf, 0x81, 0x3a, + 0xdc, 0x4f, 0x51, 0x07, 0xb5, 0x34, 0x89, 0x44, 0x3c, 0xa5, 0xac, 0x61, 0x3f, 0xc3, 0x1a, 0x5e, + 0xbd, 0x00, 0xe7, 0x7c, 0xc2, 0xf0, 0x01, 0x2c, 0x44, 0xee, 0x52, 0x5d, 0x52, 0xb9, 0xa8, 0x4b, + 0xaa, 0xbf, 0xaf, 0xc0, 0x62, 0xc2, 0xc5, 0xe5, 0xac, 0xf9, 0x71, 0x27, 0x06, 0x0d, 0xde, 0x86, + 0x76, 0xa6, 0xd9, 0x88, 0x16, 0x0e, 0x15, 0xc1, 0xfc, 0x16, 0xbf, 0xde, 0xf9, 0x59, 0xe3, 0x43, + 0x68, 0x30, 0xe2, 0x0d, 0x28, 0x0b, 0x65, 0xe2, 0xc0, 0x16, 0xe2, 0x49, 0xff, 0x38, 0x25, 0xc5, + 0x19, 0xed, 0xf5, 0x7b, 0xb0, 0x94, 0x72, 0x76, 0xa9, 0x21, 0xec, 0x2b, 0x7e, 0x38, 0x71, 0x72, + 0x5e, 0x43, 0x76, 0x7d, 0x92, 0xca, 0xae, 0xad, 0xf2, 0xc3, 0x4c, 0x94, 0x4c, 0x59, 0x8e, 0xe1, + 0x4c, 0x8e, 0xbd, 0x31, 0x15, 0xda, 0xf9, 0x99, 0xf6, 0xaf, 0x0a, 0xac, 0x26, 0xb4, 0x63, 0x6e, + 0xfa, 0xc3, 0x54, 0x83, 0xde, 0xca, 0x34, 0xe8, 0x66, 0x91, 0xcd, 0x95, 0x91, 0xd3, 0x62, 0x76, + 0x37, 0x7b, 0xd5, 0xec, 0xee, 0x0a, 0x48, 0xb1, 0xfa, 0x27, 0x05, 0x96, 0x13, 0x67, 0x77, 0x0d, + 0x8c, 0xf1, 0x61, 0x9a, 0x31, 0xbe, 0x3a, 0x4d, 0xd2, 0x94, 0x50, 0xc6, 0xbf, 0xd6, 0x52, 0xc1, + 0x7f, 0xeb, 0x49, 0xcc, 0xaf, 0x61, 0x75, 0xe4, 0x98, 0x43, 0x8b, 0xee, 0x99, 0xc4, 0xb0, 0x42, + 0x05, 0x3e, 0x31, 0xce, 0x66, 0xff, 0x18, 0x8a, 0xe0, 0xa9, 0xe7, 0x1b, 0x3e, 0xa3, 0x36, 0xfb, + 0x2c, 0xb6, 0xec, 0x7c, 0x57, 0x3a, 0x59, 0xfd, 0xac, 0x00, 0x0e, 0x17, 0x3a, 0x41, 0x3f, 0x80, + 0x45, 0x3e, 0x30, 0x1b, 0x3a, 0xe5, 0xdc, 0x5b, 0x26, 0xd6, 0x8a, 0x04, 0x5a, 0xec, 0xc6, 0x22, + 0x9c, 0xd4, 0x43, 0x8f, 0x61, 0xc5, 0x75, 0xfa, 0x07, 0xc4, 0x26, 0x03, 0xca, 0xc7, 0x8c, 0x23, + 0xf1, 0x07, 0xb2, 0x60, 0x36, 0x0b, 0x9d, 0xf7, 0xc2, 0xc9, 0xf4, 0x28, 0xaf, 0xf2, 0x82, 0x53, + 0x84, 0xfc, 0xb2, 0x28, 0xea, 0x22, 0x48, 0xe4, 0x41, 0x63, 0x28, 0x9f, 0x7b, 0x49, 0xf4, 0x82, + 0xff, 0x5b, 0x76, 0xa6, 0xc9, 0xb0, 0x93, 0x94, 0x65, 0xdc, 0xfd, 0xd3, 0xeb, 0x38, 0xe3, 0xa1, + 0x94, 0xb8, 0xd5, 0xff, 0x1f, 0xe2, 0xa6, 0xfe, 0xbb, 0x0a, 0x37, 0x73, 0xad, 0x12, 0xfd, 0xf8, + 0x1c, 0x86, 0x73, 0xeb, 0xca, 0xd8, 0x4d, 0x6e, 0x40, 0x9f, 0xbd, 0xc4, 0x80, 0xbe, 0x0b, 0xcb, + 0xfa, 0xd0, 0xf3, 0xa8, 0xcd, 0x32, 0xac, 0x26, 0xa2, 0x46, 0x7b, 0x69, 0x31, 0xce, 0xea, 0x17, + 0xb1, 0xab, 0xda, 0x25, 0xd9, 0x55, 0x32, 0x0a, 0x39, 0x21, 0x07, 0x69, 0x97, 0x8f, 0x42, 0x0e, + 0xca, 0x59, 0x7d, 0x3e, 0x1d, 0x04, 0xa8, 0x11, 0xc2, 0x7c, 0x7a, 0x3a, 0x38, 0x49, 0x49, 0x71, + 0x46, 0xbb, 0x80, 0xa9, 0x2c, 0x4c, 0xcb, 0x54, 0x10, 0x49, 0x91, 0x30, 0x10, 0x35, 0xbe, 0x3d, + 0x4d, 0x2e, 0x4f, 0xcd, 0xc2, 0xd4, 0xbf, 0x29, 0xf0, 0x72, 0x69, 0x11, 0xa0, 0xdd, 0xd4, 0x93, + 0xbb, 0x9d, 0x79, 0x72, 0xbf, 0x57, 0x6a, 0x98, 0x78, 0x77, 0xbd, 0x62, 0x6a, 0xf4, 0xc1, 0x74, + 0xd4, 0xa8, 0x60, 0x6e, 0xbf, 0x98, 0x23, 0x75, 0xb6, 0x9f, 0x3e, 0x6b, 0xcd, 0x7c, 0xfd, 0xac, + 0x35, 0xf3, 0xcd, 0xb3, 0xd6, 0xcc, 0x6f, 0x26, 0x2d, 0xe5, 0xe9, 0xa4, 0xa5, 0x7c, 0x3d, 0x69, + 0x29, 0xdf, 0x4c, 0x5a, 0xca, 0x3f, 0x26, 0x2d, 0xe5, 0x77, 0xcf, 0x5b, 0x33, 0x9f, 0xcf, 0x4b, + 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x9d, 0x5d, 0x9e, 0x04, 0x8c, 0x1b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 4fec5aad..68397a02 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -338,6 +338,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -442,6 +463,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -454,3 +481,4 @@ message StatefulSetUpdateStrategy { // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; } + diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go index 84789be6..5b16819f 100644 --- a/vendor/k8s.io/api/apps/v1beta1/register.go +++ b/vendor/k8s.io/api/apps/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index 63d1c725..dd9e97e1 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -26,6 +26,7 @@ import ( const ( ControllerRevisionHashLabelKey = "controller-revision-hash" StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -247,6 +248,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 72d5e69c..d12baf39 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -206,6 +206,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -240,6 +253,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index 2f046b02..cad744ce 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -23,105 +23,10 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -591,6 +496,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -698,6 +620,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index dafca120..e93e164e 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 07d95d8a..2572ab08 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -28,6 +28,7 @@ limitations under the License. ControllerRevision ControllerRevisionList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -50,6 +51,7 @@ limitations under the License. ScaleSpec ScaleStatus StatefulSet + StatefulSetCondition StatefulSetList StatefulSetSpec StatefulSetStatus @@ -97,120 +99,129 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{19} + return fileDescriptorGenerated, []int{20} } func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} + return fileDescriptorGenerated, []int{21} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *StatefulSet) Reset() { *m = StatefulSet{} } func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{28} + return fileDescriptorGenerated, []int{30} } func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") @@ -233,6 +244,7 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") @@ -355,6 +367,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -373,11 +427,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -412,28 +466,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n10, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -489,6 +543,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -515,11 +581,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n11 + i += n12 } return i, nil } @@ -542,27 +608,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 return i, nil } @@ -600,19 +666,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 return i, nil } @@ -634,11 +700,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -678,28 +744,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -800,11 +866,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n21, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } return i, nil } @@ -827,27 +893,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n22, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n23, err := m.Spec.MarshalTo(dAtA[i:]) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n23 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n24, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 return i, nil } @@ -877,11 +943,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n25, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n25 + i += n26 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -911,11 +977,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n26, err := m.ListMeta.MarshalTo(dAtA[i:]) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n26 + i += n27 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -955,20 +1021,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n27, err := m.Selector.MarshalTo(dAtA[i:]) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n28, err := m.Template.MarshalTo(dAtA[i:]) + n29, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1039,11 +1105,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n29, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 } return i, nil } @@ -1067,21 +1133,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n31, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } return i, nil } @@ -1127,27 +1193,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n32, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n33, err := m.Spec.MarshalTo(dAtA[i:]) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n33 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n34, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 return i, nil } @@ -1237,27 +1303,69 @@ func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n35, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n36, err := m.Spec.MarshalTo(dAtA[i:]) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n36 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n37, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) return i, nil } @@ -1279,11 +1387,11 @@ func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n38, err := m.ListMeta.MarshalTo(dAtA[i:]) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n40 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1323,20 +1431,20 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n39, err := m.Selector.MarshalTo(dAtA[i:]) + n41, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n41 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n40, err := m.Template.MarshalTo(dAtA[i:]) + n42, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n42 if len(m.VolumeClaimTemplates) > 0 { for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 @@ -1360,11 +1468,11 @@ func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n41, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n41 + i += n43 if m.RevisionHistoryLimit != nil { dAtA[i] = 0x40 i++ @@ -1416,6 +1524,18 @@ func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -1442,11 +1562,11 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n42, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n42 + i += n44 } return i, nil } @@ -1515,6 +1635,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -1561,6 +1697,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1834,6 +1976,22 @@ func (m *StatefulSet) Size() (n int) { return n } +func (m *StatefulSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *StatefulSetList) Size() (n int) { var l int _ = l @@ -1893,6 +2051,12 @@ func (m *StatefulSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -1956,6 +2120,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -1995,6 +2173,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2245,6 +2424,20 @@ func (this *StatefulSet) String() string { }, "") return s } +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *StatefulSetList) String() string { if this == nil { return "nil" @@ -2286,6 +2479,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2689,6 +2883,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -3183,6 +3573,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5969,6 +6390,202 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StatefulSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -6554,6 +7171,37 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6797,138 +7445,142 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2119 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0xda, 0xa5, 0x2c, 0xc9, 0xa6, 0x54, 0x49, 0x91, 0xdb, 0x95, 0xb0, 0x09, - 0x1c, 0x39, 0xb6, 0x67, 0x6d, 0xe5, 0x81, 0xc4, 0x06, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xc9, 0x40, 0x83, 0x16, 0x30, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0xe9, 0xb9, 0xff, 0x44, 0x7b, 0x2a, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x8e, 0x3d, 0xf7, 0x12, 0xa0, 0x40, 0x41, - 0x0e, 0xe7, 0xc1, 0x79, 0x58, 0x23, 0x25, 0xde, 0x14, 0xb9, 0x69, 0xf9, 0xfd, 0xbe, 0x1f, 0x3f, - 0x0e, 0x3f, 0x7e, 0xdf, 0x6f, 0x38, 0x02, 0x3f, 0x3e, 0x79, 0xd7, 0x55, 0x35, 0xab, 0x75, 0xe2, - 0x1d, 0x11, 0xc7, 0x24, 0x94, 0xb8, 0xad, 0x3e, 0x31, 0xbb, 0x96, 0xd3, 0x12, 0x06, 0x6c, 0x6b, - 0x2d, 0x6c, 0xdb, 0x6e, 0xab, 0x7f, 0xe7, 0x88, 0x50, 0xbc, 0xd6, 0xea, 0x11, 0x93, 0x38, 0x98, - 0x92, 0xae, 0x6a, 0x3b, 0x16, 0xb5, 0xe0, 0x82, 0x0f, 0x54, 0xb1, 0xad, 0xa9, 0x0c, 0xa8, 0x0a, - 0xe0, 0xd2, 0xad, 0x9e, 0x46, 0x8f, 0xbd, 0x23, 0xb5, 0x63, 0x19, 0xad, 0x9e, 0xd5, 0xb3, 0x5a, - 0x1c, 0x7f, 0xe4, 0x3d, 0xe1, 0xbf, 0xf8, 0x0f, 0xfe, 0x97, 0xcf, 0xb3, 0xd4, 0x8c, 0x4d, 0xd8, - 0xb1, 0x1c, 0xd2, 0xea, 0xdf, 0x49, 0xce, 0xb5, 0x74, 0x3d, 0x86, 0xb1, 0x2d, 0x5d, 0xeb, 0x0c, - 0x44, 0x58, 0x69, 0xe8, 0x5b, 0x11, 0xd4, 0xc0, 0x9d, 0x63, 0xcd, 0x24, 0xce, 0xa0, 0x65, 0x9f, - 0xf4, 0xd8, 0x80, 0xdb, 0x32, 0x08, 0xc5, 0x59, 0x13, 0xb4, 0xf2, 0xbc, 0x1c, 0xcf, 0xa4, 0x9a, - 0x41, 0x52, 0x0e, 0xef, 0x9c, 0xe5, 0xe0, 0x76, 0x8e, 0x89, 0x81, 0x53, 0x7e, 0x6f, 0xe6, 0xf9, - 0x79, 0x54, 0xd3, 0x5b, 0x9a, 0x49, 0x5d, 0xea, 0x24, 0x9d, 0x9a, 0xff, 0x51, 0x00, 0xdc, 0xb0, - 0x4c, 0xea, 0x58, 0xba, 0x4e, 0x1c, 0x44, 0xfa, 0x9a, 0xab, 0x59, 0x26, 0x7c, 0x0c, 0x6a, 0x6c, - 0x3d, 0x5d, 0x4c, 0xf1, 0xa2, 0xb2, 0xa2, 0xac, 0x4e, 0xae, 0xdd, 0x56, 0xa3, 0x4d, 0x09, 0xe9, - 0x55, 0xfb, 0xa4, 0xc7, 0x06, 0x5c, 0x95, 0xa1, 0xd5, 0xfe, 0x1d, 0x75, 0xf7, 0xe8, 0x63, 0xd2, - 0xa1, 0xdb, 0x84, 0xe2, 0x36, 0x7c, 0x76, 0xba, 0x3c, 0x36, 0x3c, 0x5d, 0x06, 0xd1, 0x18, 0x0a, - 0x59, 0xe1, 0x2e, 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x95, 0xcb, 0x2e, 0x16, 0xad, 0x22, 0xfc, - 0x8b, 0xf7, 0x9f, 0x52, 0x62, 0xb2, 0xf0, 0xda, 0x97, 0x04, 0x75, 0x65, 0x13, 0x53, 0x8c, 0x38, - 0x11, 0xbc, 0x09, 0x6a, 0x8e, 0x08, 0x7f, 0xb1, 0xbc, 0xa2, 0xac, 0x96, 0xdb, 0x97, 0x05, 0xaa, - 0x16, 0x2c, 0x0b, 0x85, 0x88, 0xe6, 0x33, 0x05, 0xcc, 0xa7, 0xd7, 0xbd, 0xa5, 0xb9, 0x14, 0xfe, - 0x2c, 0xb5, 0x76, 0xb5, 0xd8, 0xda, 0x99, 0x37, 0x5f, 0x79, 0x38, 0x71, 0x30, 0x12, 0x5b, 0xf7, - 0x1e, 0xa8, 0x6a, 0x94, 0x18, 0xee, 0x62, 0x69, 0xa5, 0xbc, 0x3a, 0xb9, 0x76, 0x43, 0xcd, 0xc9, - 0x75, 0x35, 0x1d, 0x5d, 0x7b, 0x4a, 0xf0, 0x56, 0x1f, 0x32, 0x06, 0xe4, 0x13, 0x35, 0x7f, 0x53, - 0x02, 0xf5, 0x4d, 0x4c, 0x0c, 0xcb, 0xdc, 0x27, 0x74, 0x04, 0x3b, 0xf7, 0x00, 0x54, 0x5c, 0x9b, - 0x74, 0xc4, 0xce, 0x5d, 0xcb, 0x5d, 0x40, 0x18, 0xd3, 0xbe, 0x4d, 0x3a, 0xd1, 0x96, 0xb1, 0x5f, - 0x88, 0x33, 0xc0, 0x3d, 0x30, 0xee, 0x52, 0x4c, 0x3d, 0x97, 0x6f, 0xd8, 0xe4, 0xda, 0x6a, 0x01, - 0x2e, 0x8e, 0x6f, 0x4f, 0x0b, 0xb6, 0x71, 0xff, 0x37, 0x12, 0x3c, 0xcd, 0x3f, 0x29, 0x60, 0x2a, - 0xc4, 0x8e, 0x60, 0x37, 0xef, 0xcb, 0xbb, 0xd9, 0x3c, 0x7b, 0x01, 0x39, 0x9b, 0xf8, 0x69, 0x39, - 0x16, 0x38, 0x7b, 0x44, 0xf0, 0xe7, 0xa0, 0xe6, 0x12, 0x9d, 0x74, 0xa8, 0xe5, 0x88, 0xc0, 0xdf, - 0x2c, 0x18, 0x38, 0x3e, 0x22, 0xfa, 0xbe, 0x70, 0x6d, 0x5f, 0x62, 0x91, 0x07, 0xbf, 0x50, 0x48, - 0x09, 0x3f, 0x04, 0x35, 0x4a, 0x0c, 0x5b, 0xc7, 0x94, 0x88, 0x9d, 0x7c, 0x35, 0x1e, 0x3c, 0x2b, - 0x97, 0x8c, 0x6c, 0xcf, 0xea, 0x1e, 0x08, 0x18, 0xdf, 0xc6, 0xf0, 0x61, 0x04, 0xa3, 0x28, 0xa4, - 0x81, 0x36, 0x98, 0xf6, 0xec, 0x2e, 0x43, 0x52, 0x56, 0x62, 0x7a, 0x03, 0xb1, 0xad, 0xb7, 0xcf, - 0x7e, 0x2a, 0x87, 0x92, 0x5f, 0x7b, 0x5e, 0xcc, 0x32, 0x2d, 0x8f, 0xa3, 0x04, 0x3f, 0x5c, 0x07, - 0x33, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, 0xd2, 0xb1, 0xcc, 0xae, 0xbb, 0x58, 0x59, 0x51, - 0x56, 0xab, 0xed, 0x05, 0x41, 0x30, 0xb3, 0x2d, 0x9b, 0x51, 0x12, 0x0f, 0xb7, 0xc0, 0x5c, 0x50, - 0x14, 0x1e, 0x68, 0x2e, 0xb5, 0x9c, 0xc1, 0x96, 0x66, 0x68, 0x74, 0x71, 0x9c, 0xf3, 0x2c, 0x0e, - 0x4f, 0x97, 0xe7, 0x50, 0x86, 0x1d, 0x65, 0x7a, 0x35, 0xff, 0x58, 0x05, 0x33, 0x89, 0x5c, 0x85, - 0x8f, 0xc0, 0x7c, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, 0x38, 0x22, 0xce, 0x7e, 0xe7, 0x98, - 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0x11, 0xeb, 0xfc, 0x46, 0x26, 0x0a, 0xe5, - 0x78, 0xc3, 0x0f, 0x00, 0x34, 0xf9, 0xd0, 0xb6, 0xe6, 0xba, 0x21, 0x67, 0x89, 0x73, 0x2e, 0x09, - 0x4e, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2f, 0x16, 0x63, 0x97, 0xb8, 0x9a, 0x43, 0xba, 0xc9, 0x18, - 0xcb, 0x72, 0x8c, 0x9b, 0x99, 0x28, 0x94, 0xe3, 0x0d, 0xdf, 0x06, 0x93, 0xfe, 0x6c, 0xfc, 0x99, - 0x8b, 0xcd, 0x99, 0x15, 0x64, 0x93, 0x3b, 0x91, 0x09, 0xc5, 0x71, 0x6c, 0x69, 0xd6, 0x91, 0x4b, - 0x9c, 0x3e, 0xe9, 0xde, 0xf7, 0x1b, 0x16, 0xab, 0xea, 0x55, 0x5e, 0xd5, 0xc3, 0xa5, 0xed, 0xa6, - 0x10, 0x28, 0xc3, 0x8b, 0x2d, 0xcd, 0xcf, 0x9a, 0xd4, 0xd2, 0xc6, 0xe5, 0xa5, 0x1d, 0x66, 0xa2, - 0x50, 0x8e, 0x37, 0xcb, 0x3d, 0x3f, 0xe4, 0xf5, 0x3e, 0xd6, 0x74, 0x7c, 0xa4, 0x93, 0xc5, 0x09, - 0x39, 0xf7, 0x76, 0x64, 0x33, 0x4a, 0xe2, 0xe1, 0x7d, 0x70, 0xc5, 0x1f, 0x3a, 0x34, 0x71, 0x48, - 0x52, 0xe3, 0x24, 0xaf, 0x08, 0x92, 0x2b, 0x3b, 0x49, 0x00, 0x4a, 0xfb, 0xc0, 0xbb, 0x60, 0xba, - 0x63, 0xe9, 0x3a, 0xcf, 0xc7, 0x0d, 0xcb, 0x33, 0xe9, 0x62, 0x9d, 0xb3, 0x40, 0x76, 0x86, 0x36, - 0x24, 0x0b, 0x4a, 0x20, 0x9b, 0x9f, 0x2a, 0x60, 0x21, 0xe7, 0x1c, 0xc2, 0x1f, 0x81, 0x0a, 0x1d, - 0xd8, 0x84, 0x27, 0x6a, 0xbd, 0x7d, 0x23, 0x28, 0xe1, 0x07, 0x03, 0x9b, 0x7c, 0x75, 0xba, 0x7c, - 0x35, 0xc7, 0x8d, 0x99, 0x11, 0x77, 0x84, 0xc7, 0x60, 0x8a, 0xf5, 0x30, 0xcd, 0xec, 0xf9, 0x10, - 0x51, 0x6a, 0x5a, 0xb9, 0x15, 0x01, 0xc5, 0xd1, 0x51, 0xd1, 0xbc, 0x32, 0x3c, 0x5d, 0x9e, 0x92, - 0x6c, 0x48, 0x26, 0x6e, 0xfe, 0xb6, 0x04, 0xc0, 0x26, 0xb1, 0x75, 0x6b, 0x60, 0x10, 0x73, 0x14, - 0x6d, 0xf0, 0xa1, 0xd4, 0x06, 0x5f, 0xcf, 0xaf, 0x71, 0x61, 0x50, 0xb9, 0x7d, 0xf0, 0xc3, 0x44, - 0x1f, 0xbc, 0x5e, 0x84, 0xec, 0xc5, 0x8d, 0xf0, 0xf3, 0x32, 0x98, 0x8d, 0xc0, 0x1b, 0x96, 0xd9, - 0xd5, 0xf8, 0x69, 0xb8, 0x27, 0xed, 0xe8, 0xeb, 0x89, 0x1d, 0x5d, 0xc8, 0x70, 0x89, 0xed, 0xe6, - 0x56, 0x18, 0x67, 0x89, 0xbb, 0xbf, 0x25, 0x4f, 0xfe, 0xd5, 0xe9, 0x72, 0x86, 0xe2, 0x56, 0x43, - 0x26, 0x39, 0x44, 0x78, 0x0d, 0x8c, 0x3b, 0x04, 0xbb, 0x96, 0xc9, 0xcb, 0x42, 0x3d, 0x5a, 0x0a, - 0xe2, 0xa3, 0x48, 0x58, 0xe1, 0x75, 0x30, 0x61, 0x10, 0xd7, 0xc5, 0x3d, 0xc2, 0x2b, 0x40, 0xbd, - 0x3d, 0x23, 0x80, 0x13, 0xdb, 0xfe, 0x30, 0x0a, 0xec, 0xf0, 0x63, 0x30, 0xad, 0x63, 0x57, 0xa4, - 0xe3, 0x81, 0x66, 0x10, 0x7e, 0xc6, 0x27, 0xd7, 0xde, 0x28, 0xb6, 0xf7, 0xcc, 0x23, 0xea, 0x3d, - 0x5b, 0x12, 0x13, 0x4a, 0x30, 0xc3, 0x3e, 0x80, 0x6c, 0xe4, 0xc0, 0xc1, 0xa6, 0xeb, 0x3f, 0x28, - 0x36, 0xdf, 0xc4, 0xb9, 0xe7, 0x0b, 0xeb, 0xd9, 0x56, 0x8a, 0x0d, 0x65, 0xcc, 0xd0, 0xfc, 0xb3, - 0x02, 0xa6, 0xa3, 0x6d, 0x1a, 0x81, 0xc6, 0x79, 0x20, 0x6b, 0x9c, 0x57, 0x0b, 0x24, 0x67, 0x8e, - 0xc8, 0xf9, 0xbc, 0x12, 0x0f, 0x9d, 0xab, 0x9c, 0x55, 0xa6, 0xda, 0x6d, 0x5d, 0xeb, 0x60, 0x57, - 0xb4, 0xc3, 0x4b, 0xbe, 0x62, 0xf7, 0xc7, 0x50, 0x68, 0x95, 0xf4, 0x50, 0xe9, 0xe5, 0xea, 0xa1, - 0xf2, 0x37, 0xa3, 0x87, 0x7e, 0x0a, 0x6a, 0x6e, 0xa0, 0x84, 0x2a, 0x9c, 0xf2, 0x46, 0xa1, 0x83, - 0x2d, 0x44, 0x50, 0x48, 0x1d, 0xca, 0x9f, 0x90, 0x2e, 0x4b, 0xf8, 0x54, 0xbf, 0x4d, 0xe1, 0xc3, - 0x0e, 0xb3, 0x8d, 0x3d, 0x97, 0x74, 0xf9, 0x09, 0xa8, 0x45, 0x87, 0x79, 0x8f, 0x8f, 0x22, 0x61, - 0x85, 0x87, 0x60, 0xc1, 0x76, 0xac, 0x9e, 0x43, 0x5c, 0x77, 0x93, 0xe0, 0xae, 0xae, 0x99, 0x24, - 0x58, 0x80, 0xdf, 0xb2, 0xae, 0x0e, 0x4f, 0x97, 0x17, 0xf6, 0xb2, 0x21, 0x28, 0xcf, 0xb7, 0xf9, - 0xb7, 0x0a, 0xb8, 0x9c, 0xac, 0x8d, 0x39, 0x2a, 0x42, 0xb9, 0x90, 0x8a, 0xb8, 0x19, 0xcb, 0x53, - 0x5f, 0x62, 0xc5, 0xde, 0x2e, 0x53, 0xb9, 0xba, 0x0e, 0x66, 0x84, 0x6a, 0x08, 0x8c, 0x42, 0x47, - 0x85, 0xdb, 0x73, 0x28, 0x9b, 0x51, 0x12, 0xcf, 0xb4, 0x41, 0xd4, 0xf2, 0x03, 0x92, 0x8a, 0xac, - 0x0d, 0xd6, 0x93, 0x00, 0x94, 0xf6, 0x81, 0xdb, 0x60, 0xd6, 0x33, 0xd3, 0x54, 0x7e, 0xba, 0x5c, - 0x15, 0x54, 0xb3, 0x87, 0x69, 0x08, 0xca, 0xf2, 0x83, 0x8f, 0x01, 0xe8, 0x04, 0x05, 0xdd, 0x5d, - 0x1c, 0xe7, 0x25, 0xe1, 0x66, 0x81, 0xb4, 0x0e, 0xbb, 0x40, 0xd4, 0x56, 0xc3, 0x21, 0x17, 0xc5, - 0x38, 0xe1, 0x3d, 0x30, 0xe5, 0x70, 0x49, 0x18, 0x84, 0xea, 0xcb, 0xaa, 0xef, 0x09, 0xb7, 0x29, - 0x14, 0x37, 0x22, 0x19, 0x9b, 0xa1, 0x84, 0x6a, 0x85, 0x95, 0xd0, 0x5f, 0x15, 0x00, 0xd3, 0xe7, - 0x10, 0xde, 0x95, 0x5a, 0xe6, 0xb5, 0x44, 0xcb, 0x9c, 0x4f, 0x7b, 0xc4, 0x3a, 0xa6, 0x96, 0xad, - 0x7f, 0x6e, 0x17, 0xd4, 0x3f, 0x51, 0x41, 0x2d, 0x26, 0x80, 0xc4, 0x63, 0x18, 0xcd, 0x3d, 0x40, - 0x51, 0x01, 0x14, 0x05, 0xf5, 0x0d, 0x08, 0xa0, 0x18, 0xd9, 0x8b, 0x05, 0xd0, 0xbf, 0x4b, 0x60, - 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x2e, 0x2f, 0x4d, 0x00, 0x65, 0x2b, 0x88, 0xf2, 0xcb, 0x56, - 0x10, 0x2f, 0x41, 0x78, 0x71, 0x51, 0x12, 0x3d, 0xba, 0xff, 0x27, 0x51, 0x12, 0x45, 0x95, 0x23, - 0x4a, 0xfe, 0x50, 0x8a, 0x87, 0xfe, 0x9d, 0x17, 0x25, 0x5f, 0xff, 0xca, 0xa4, 0xf9, 0xf7, 0x32, - 0xb8, 0x9c, 0x3c, 0x87, 0x52, 0x83, 0x54, 0xce, 0x6c, 0x90, 0x7b, 0x60, 0xee, 0x89, 0xa7, 0xeb, - 0x03, 0xfe, 0x18, 0x62, 0x5d, 0xd2, 0x6f, 0xad, 0xdf, 0x17, 0x9e, 0x73, 0x3f, 0xc9, 0xc0, 0xa0, - 0x4c, 0xcf, 0x9c, 0x66, 0x5f, 0xbe, 0x50, 0xb3, 0x4f, 0x75, 0xa0, 0xca, 0x39, 0x3a, 0x50, 0x66, - 0xe3, 0xae, 0x5e, 0xa0, 0x71, 0x9f, 0xaf, 0xd3, 0x66, 0x14, 0xae, 0xb3, 0x3a, 0x6d, 0xf3, 0xd7, - 0x0a, 0x98, 0xcf, 0x7e, 0xe1, 0x86, 0x3a, 0x98, 0x36, 0xf0, 0xd3, 0xf8, 0xbd, 0xc4, 0x59, 0x4d, - 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x5f, 0x19, 0xd4, 0x87, 0x26, 0xdd, 0x75, 0xf6, 0xa9, 0xa3, 0x99, - 0x3d, 0xbf, 0xf3, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x52, 0x01, 0x0b, 0x39, 0x9d, 0x6f, - 0xb4, 0x91, 0xc0, 0x8f, 0x40, 0xcd, 0xc0, 0x4f, 0xf7, 0x3d, 0xa7, 0x97, 0xd5, 0xab, 0x8b, 0xcd, - 0xc3, 0x4f, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0xac, 0x48, 0x8b, 0x64, 0x27, 0x87, - 0x3c, 0xf1, 0x74, 0x7e, 0x88, 0x84, 0xd8, 0xb8, 0x01, 0xea, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, - 0x6a, 0x7b, 0x6a, 0x78, 0xba, 0x5c, 0xdf, 0x0b, 0x06, 0x51, 0x64, 0x6f, 0xfe, 0x57, 0x01, 0xd5, - 0xfd, 0x0e, 0xd6, 0xc9, 0x08, 0xba, 0xfd, 0xa6, 0xd4, 0xed, 0xf3, 0x2f, 0xba, 0x79, 0x3c, 0xb9, - 0x8d, 0x7e, 0x2b, 0xd1, 0xe8, 0x5f, 0x3b, 0x83, 0xe7, 0xc5, 0x3d, 0xfe, 0x3d, 0x50, 0x0f, 0xa7, - 0x3b, 0x5f, 0x01, 0x6a, 0xfe, 0xbe, 0x04, 0x26, 0x63, 0x53, 0x9c, 0xb3, 0x7c, 0x3d, 0x96, 0xca, - 0x3e, 0x3b, 0x98, 0x6b, 0x45, 0x16, 0xa2, 0x06, 0x25, 0xfe, 0x7d, 0x93, 0x3a, 0xf1, 0x17, 0xbc, - 0x74, 0xe5, 0xff, 0x21, 0x98, 0xa6, 0xd8, 0xe9, 0x11, 0x1a, 0xd8, 0xf8, 0x03, 0xab, 0x47, 0xb7, - 0x13, 0x07, 0x92, 0x15, 0x25, 0xd0, 0x4b, 0xf7, 0xc0, 0x94, 0x34, 0x19, 0xbc, 0x0c, 0xca, 0x27, - 0x64, 0xe0, 0xcb, 0x1e, 0xc4, 0xfe, 0x84, 0x73, 0xa0, 0xda, 0xc7, 0xba, 0xe7, 0xe7, 0x79, 0x1d, - 0xf9, 0x3f, 0xee, 0x96, 0xde, 0x55, 0x9a, 0x9f, 0xb0, 0x87, 0x13, 0x25, 0xe7, 0x08, 0xb2, 0xeb, - 0x03, 0x29, 0xbb, 0xf2, 0xbf, 0x03, 0xc5, 0x8f, 0x4c, 0x5e, 0x8e, 0xa1, 0x44, 0x8e, 0xbd, 0x51, - 0x88, 0xed, 0xc5, 0x99, 0xf6, 0x17, 0x05, 0xcc, 0xc4, 0xd0, 0x23, 0x10, 0x38, 0x0f, 0x65, 0x81, - 0xf3, 0x5a, 0x91, 0x45, 0xe4, 0x28, 0x9c, 0x7f, 0x54, 0xa5, 0xe0, 0xbf, 0xf3, 0x12, 0xe7, 0x97, - 0x60, 0xae, 0x6f, 0xe9, 0x9e, 0x41, 0x36, 0x74, 0xac, 0x19, 0x01, 0x80, 0x75, 0xf1, 0x72, 0xf2, - 0xdd, 0x22, 0xa4, 0x27, 0x8e, 0xab, 0xb9, 0x94, 0x98, 0xf4, 0x51, 0xe4, 0x19, 0xe9, 0x90, 0x47, - 0x19, 0x74, 0x28, 0x73, 0x12, 0xf8, 0x36, 0x98, 0x64, 0x7a, 0x42, 0xeb, 0x90, 0x1d, 0x6c, 0x04, - 0xc2, 0x39, 0xfc, 0xe2, 0xb1, 0x1f, 0x99, 0x50, 0x1c, 0x07, 0x8f, 0xc1, 0xac, 0x6d, 0x75, 0xb7, - 0xb1, 0x89, 0x7b, 0x84, 0xb5, 0xbd, 0x3d, 0xfe, 0xaf, 0x08, 0xfc, 0x32, 0xa6, 0xde, 0x7e, 0x27, - 0x78, 0x4b, 0xdf, 0x4b, 0x43, 0xd8, 0x4b, 0x4b, 0xc6, 0x30, 0x7f, 0x69, 0xc9, 0xa2, 0x84, 0x4e, - 0xea, 0x2b, 0x9d, 0x7f, 0x67, 0xb9, 0x56, 0x24, 0xc3, 0x2e, 0xf8, 0x9d, 0x2e, 0xef, 0xae, 0xa9, - 0x76, 0xa1, 0x8f, 0x6c, 0x9f, 0x54, 0xc0, 0x95, 0xd4, 0xd1, 0xfd, 0x16, 0x6f, 0x7b, 0x52, 0x72, - 0xb1, 0x7c, 0x0e, 0xb9, 0xb8, 0x0e, 0x66, 0xc4, 0xf7, 0xbd, 0x84, 0xda, 0x0c, 0xf5, 0xf8, 0x86, - 0x6c, 0x46, 0x49, 0x7c, 0xd6, 0x6d, 0x53, 0xf5, 0x9c, 0xb7, 0x4d, 0xf1, 0x28, 0xc4, 0xff, 0x50, - 0xf8, 0xa9, 0x97, 0x8e, 0x42, 0xfc, 0x2b, 0x45, 0x12, 0xcf, 0x3a, 0x96, 0xcf, 0x1a, 0x32, 0x4c, - 0xc8, 0x1d, 0xeb, 0x50, 0xb2, 0xa2, 0x04, 0xfa, 0x6b, 0x7d, 0xc3, 0xfa, 0xa7, 0x02, 0x5e, 0xc9, - 0xcd, 0x52, 0xb8, 0x2e, 0xbd, 0xf2, 0xdf, 0x4a, 0xbc, 0xf2, 0xff, 0x20, 0xd7, 0x31, 0xf6, 0xe2, - 0xef, 0x64, 0xdf, 0xe3, 0xbc, 0x57, 0xec, 0x1e, 0x27, 0x43, 0xe8, 0x9d, 0x7d, 0xa1, 0xd3, 0xbe, - 0xf5, 0xec, 0x79, 0x63, 0xec, 0xb3, 0xe7, 0x8d, 0xb1, 0x2f, 0x9e, 0x37, 0xc6, 0x7e, 0x35, 0x6c, - 0x28, 0xcf, 0x86, 0x0d, 0xe5, 0xb3, 0x61, 0x43, 0xf9, 0x62, 0xd8, 0x50, 0xfe, 0x35, 0x6c, 0x28, - 0xbf, 0xfb, 0xb2, 0x31, 0xf6, 0xd1, 0x84, 0x98, 0xf1, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xe8, - 0x1d, 0x40, 0xdd, 0x77, 0x25, 0x00, 0x00, + // 2186 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, + 0x19, 0xf7, 0xec, 0x43, 0x5a, 0x51, 0x96, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, + 0x1c, 0x39, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, + 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, + 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, + 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, + 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, + 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xf4, 0xb6, 0xab, 0x6a, + 0x56, 0xeb, 0xc8, 0x3b, 0x20, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, + 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x03, 0x42, 0xf1, 0x6a, 0xab, 0x47, + 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x0b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, + 0x32, 0xa0, 0x2a, 0x80, 0x8b, 0x2b, 0x3d, 0x8d, 0x1e, 0x7a, 0x07, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, + 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xc0, 0x7b, 0xc4, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x36, + 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x93, 0x63, 0x2d, 0x5e, 0x8d, 0x61, 0x6c, 0x4b, + 0xd7, 0x3a, 0x03, 0xe1, 0x56, 0x1a, 0xfa, 0x46, 0x04, 0x35, 0x70, 0xe7, 0x50, 0x33, 0x89, 0x33, + 0x68, 0xd9, 0x47, 0x3d, 0xd6, 0xe0, 0xb6, 0x0c, 0x42, 0x71, 0xd6, 0x00, 0xad, 0x3c, 0x2b, 0xc7, + 0x33, 0xa9, 0x66, 0x90, 0x94, 0xc1, 0x5b, 0xa3, 0x0c, 0xdc, 0xce, 0x21, 0x31, 0x70, 0xca, 0xee, + 0xf5, 0x3c, 0x3b, 0x8f, 0x6a, 0x7a, 0x4b, 0x33, 0xa9, 0x4b, 0x9d, 0xa4, 0x51, 0xf3, 0x3f, 0x0a, + 0x80, 0xeb, 0x96, 0x49, 0x1d, 0x4b, 0xd7, 0x89, 0x83, 0x48, 0x5f, 0x73, 0x35, 0xcb, 0x84, 0x0f, + 0x41, 0x8d, 0xcd, 0xa7, 0x8b, 0x29, 0xae, 0x2b, 0x97, 0x95, 0xe5, 0xc9, 0xd5, 0x1b, 0x6a, 0xb4, + 0x29, 0x21, 0xbd, 0x6a, 0x1f, 0xf5, 0x58, 0x83, 0xab, 0x32, 0xb4, 0xda, 0xbf, 0xa9, 0xee, 0x1c, + 0x7c, 0x48, 0x3a, 0x74, 0x8b, 0x50, 0xdc, 0x86, 0x4f, 0x8e, 0x97, 0xce, 0x0d, 0x8f, 0x97, 0x40, + 0xd4, 0x86, 0x42, 0x56, 0xb8, 0x03, 0x2a, 0x9c, 0xbd, 0xc4, 0xd9, 0x57, 0x72, 0xd9, 0xc5, 0xa4, + 0x55, 0x84, 0x7f, 0xf2, 0xee, 0x63, 0x4a, 0x4c, 0xe6, 0x5e, 0xfb, 0xbc, 0xa0, 0xae, 0x6c, 0x60, + 0x8a, 0x11, 0x27, 0x82, 0xd7, 0x41, 0xcd, 0x11, 0xee, 0xd7, 0xcb, 0x97, 0x95, 0xe5, 0x72, 0xfb, + 0x82, 0x40, 0xd5, 0x82, 0x69, 0xa1, 0x10, 0xd1, 0x7c, 0xa2, 0x80, 0xf9, 0xf4, 0xbc, 0x37, 0x35, + 0x97, 0xc2, 0x1f, 0xa6, 0xe6, 0xae, 0x16, 0x9b, 0x3b, 0xb3, 0xe6, 0x33, 0x0f, 0x07, 0x0e, 0x5a, + 0x62, 0xf3, 0xde, 0x05, 0x55, 0x8d, 0x12, 0xc3, 0xad, 0x97, 0x2e, 0x97, 0x97, 0x27, 0x57, 0xaf, + 0xa9, 0x39, 0xb1, 0xae, 0xa6, 0xbd, 0x6b, 0x4f, 0x09, 0xde, 0xea, 0x3d, 0xc6, 0x80, 0x7c, 0xa2, + 0xe6, 0x2f, 0x4a, 0x60, 0x62, 0x03, 0x13, 0xc3, 0x32, 0xf7, 0x08, 0x3d, 0x83, 0x9d, 0xbb, 0x0b, + 0x2a, 0xae, 0x4d, 0x3a, 0x62, 0xe7, 0xae, 0xe4, 0x4e, 0x20, 0xf4, 0x69, 0xcf, 0x26, 0x9d, 0x68, + 0xcb, 0xd8, 0x13, 0xe2, 0x0c, 0x70, 0x17, 0x8c, 0xb9, 0x14, 0x53, 0xcf, 0xe5, 0x1b, 0x36, 0xb9, + 0xba, 0x5c, 0x80, 0x8b, 0xe3, 0xdb, 0xd3, 0x82, 0x6d, 0xcc, 0x7f, 0x46, 0x82, 0xa7, 0xf9, 0x8f, + 0x12, 0x80, 0x21, 0x76, 0xdd, 0x32, 0xbb, 0x1a, 0x65, 0xe1, 0x7c, 0x0b, 0x54, 0xe8, 0xc0, 0x26, + 0x7c, 0x41, 0x26, 0xda, 0x57, 0x02, 0x57, 0xee, 0x0f, 0x6c, 0xf2, 0xf9, 0xf1, 0xd2, 0x7c, 0xda, + 0x82, 0xf5, 0x20, 0x6e, 0x03, 0x37, 0x43, 0x27, 0x4b, 0xdc, 0xfa, 0x0d, 0x79, 0xe8, 0xcf, 0x8f, + 0x97, 0x32, 0x8e, 0x19, 0x35, 0x64, 0x92, 0x1d, 0x84, 0x7d, 0x00, 0x75, 0xec, 0xd2, 0xfb, 0x0e, + 0x36, 0x5d, 0x7f, 0x24, 0xcd, 0x20, 0x62, 0xfa, 0xaf, 0x15, 0xdb, 0x28, 0x66, 0xd1, 0x5e, 0x14, + 0x5e, 0xc0, 0xcd, 0x14, 0x1b, 0xca, 0x18, 0x01, 0x5e, 0x01, 0x63, 0x0e, 0xc1, 0xae, 0x65, 0xd6, + 0x2b, 0x7c, 0x16, 0xe1, 0x02, 0x22, 0xde, 0x8a, 0x44, 0x2f, 0xbc, 0x0a, 0xc6, 0x0d, 0xe2, 0xba, + 0xb8, 0x47, 0xea, 0x55, 0x0e, 0x9c, 0x11, 0xc0, 0xf1, 0x2d, 0xbf, 0x19, 0x05, 0xfd, 0xcd, 0xdf, + 0x2b, 0x60, 0x2a, 0x5c, 0xb9, 0x33, 0xc8, 0x9c, 0x3b, 0x72, 0xe6, 0x34, 0x47, 0x07, 0x4b, 0x4e, + 0xc2, 0x7c, 0x54, 0x8e, 0x39, 0xce, 0xc2, 0x11, 0xfe, 0x08, 0xd4, 0x5c, 0xa2, 0x93, 0x0e, 0xb5, + 0x1c, 0xe1, 0xf8, 0xeb, 0x05, 0x1d, 0xc7, 0x07, 0x44, 0xdf, 0x13, 0xa6, 0xed, 0xf3, 0xcc, 0xf3, + 0xe0, 0x09, 0x85, 0x94, 0xf0, 0x7d, 0x50, 0xa3, 0xc4, 0xb0, 0x75, 0x4c, 0x89, 0xc8, 0x9a, 0x97, + 0xe3, 0xce, 0xb3, 0x98, 0x61, 0x64, 0xbb, 0x56, 0xf7, 0xbe, 0x80, 0xf1, 0x94, 0x09, 0x17, 0x23, + 0x68, 0x45, 0x21, 0x0d, 0xb4, 0xc1, 0xb4, 0x67, 0x77, 0x19, 0x92, 0xb2, 0xe3, 0xbc, 0x37, 0x10, + 0x31, 0x74, 0x63, 0xf4, 0xaa, 0xec, 0x4b, 0x76, 0xed, 0x79, 0x31, 0xca, 0xb4, 0xdc, 0x8e, 0x12, + 0xfc, 0x70, 0x0d, 0xcc, 0x18, 0x9a, 0x89, 0x08, 0xee, 0x0e, 0xf6, 0x48, 0xc7, 0x32, 0xbb, 0x2e, + 0x0f, 0xa5, 0x6a, 0x7b, 0x41, 0x10, 0xcc, 0x6c, 0xc9, 0xdd, 0x28, 0x89, 0x87, 0x9b, 0x60, 0x2e, + 0x38, 0x80, 0xef, 0x6a, 0x2e, 0xb5, 0x9c, 0xc1, 0xa6, 0x66, 0x68, 0xb4, 0x3e, 0xc6, 0x79, 0xea, + 0xc3, 0xe3, 0xa5, 0x39, 0x94, 0xd1, 0x8f, 0x32, 0xad, 0x9a, 0xbf, 0x19, 0x03, 0x33, 0x89, 0x73, + 0x01, 0x3e, 0x00, 0xf3, 0x1d, 0xcf, 0x71, 0x88, 0x49, 0xb7, 0x3d, 0xe3, 0x80, 0x38, 0x7b, 0x9d, + 0x43, 0xd2, 0xf5, 0x74, 0xd2, 0xe5, 0xdb, 0x5a, 0x6d, 0x37, 0x84, 0xaf, 0xf3, 0xeb, 0x99, 0x28, + 0x94, 0x63, 0x0d, 0xdf, 0x03, 0xd0, 0xe4, 0x4d, 0x5b, 0x9a, 0xeb, 0x86, 0x9c, 0x25, 0xce, 0x19, + 0xa6, 0xe2, 0x76, 0x0a, 0x81, 0x32, 0xac, 0x98, 0x8f, 0x5d, 0xe2, 0x6a, 0x0e, 0xe9, 0x26, 0x7d, + 0x2c, 0xcb, 0x3e, 0x6e, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x7c, 0x13, 0x4c, 0xfa, 0xa3, 0xf1, 0x35, + 0x17, 0x9b, 0x33, 0x2b, 0xc8, 0x26, 0xb7, 0xa3, 0x2e, 0x14, 0xc7, 0xb1, 0xa9, 0x59, 0x07, 0x2e, + 0x71, 0xfa, 0xa4, 0x7b, 0xc7, 0x17, 0x07, 0xac, 0x82, 0x56, 0x79, 0x05, 0x0d, 0xa7, 0xb6, 0x93, + 0x42, 0xa0, 0x0c, 0x2b, 0x36, 0x35, 0x3f, 0x6a, 0x52, 0x53, 0x1b, 0x93, 0xa7, 0xb6, 0x9f, 0x89, + 0x42, 0x39, 0xd6, 0x2c, 0xf6, 0x7c, 0x97, 0xd7, 0xfa, 0x58, 0xd3, 0xf1, 0x81, 0x4e, 0xea, 0xe3, + 0x72, 0xec, 0x6d, 0xcb, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x45, 0xbf, 0x69, 0xdf, 0xc4, 0x21, + 0x49, 0x8d, 0x93, 0xbc, 0x24, 0x48, 0x2e, 0x6e, 0x27, 0x01, 0x28, 0x6d, 0x03, 0x6f, 0x81, 0xe9, + 0x8e, 0xa5, 0xeb, 0x3c, 0x1e, 0xd7, 0x2d, 0xcf, 0xa4, 0xf5, 0x09, 0xce, 0x02, 0x59, 0x0e, 0xad, + 0x4b, 0x3d, 0x28, 0x81, 0x84, 0x3f, 0x06, 0xa0, 0x13, 0x14, 0x06, 0xb7, 0x0e, 0x46, 0x28, 0x80, + 0x74, 0x59, 0x8a, 0x2a, 0x73, 0xd8, 0xe4, 0xa2, 0x18, 0x65, 0xf3, 0x23, 0x05, 0x2c, 0xe4, 0x24, + 0x3a, 0xfc, 0xae, 0x54, 0x04, 0xaf, 0x25, 0x8a, 0xe0, 0xa5, 0x1c, 0xb3, 0x58, 0x25, 0x3c, 0x04, + 0x53, 0x4c, 0x90, 0x68, 0x66, 0xcf, 0x87, 0x88, 0xb3, 0xac, 0x95, 0x3b, 0x01, 0x14, 0x47, 0x47, + 0xa7, 0xf2, 0xc5, 0xe1, 0xf1, 0xd2, 0x94, 0xd4, 0x87, 0x64, 0xe2, 0xe6, 0x2f, 0x4b, 0x00, 0x6c, + 0x10, 0x5b, 0xb7, 0x06, 0x06, 0x31, 0xcf, 0x42, 0xd3, 0xdc, 0x93, 0x34, 0xcd, 0xab, 0xf9, 0x5b, + 0x12, 0x3a, 0x95, 0x2b, 0x6a, 0xde, 0x4f, 0x88, 0x9a, 0xab, 0x45, 0xc8, 0x9e, 0xad, 0x6a, 0x3e, + 0x29, 0x83, 0xd9, 0x08, 0x1c, 0xc9, 0x9a, 0xdb, 0xd2, 0x8e, 0xbe, 0x9a, 0xd8, 0xd1, 0x85, 0x0c, + 0x93, 0x17, 0xa6, 0x6b, 0x9e, 0xbf, 0xbe, 0x80, 0x1f, 0x82, 0x69, 0x26, 0x64, 0xfc, 0x90, 0xe0, + 0x32, 0x69, 0xec, 0xc4, 0x32, 0x29, 0x2c, 0x6e, 0x9b, 0x12, 0x13, 0x4a, 0x30, 0xe7, 0xc8, 0xb2, + 0xf1, 0x17, 0x2d, 0xcb, 0x9a, 0x7f, 0x50, 0xc0, 0x74, 0xb4, 0x4d, 0x67, 0x20, 0xa2, 0xee, 0xca, + 0x22, 0xea, 0xe5, 0x02, 0xc1, 0x99, 0xa3, 0xa2, 0x3e, 0xa9, 0xc4, 0x5d, 0xe7, 0x32, 0x6a, 0x99, + 0xbd, 0x82, 0xd9, 0xba, 0xd6, 0xc1, 0xae, 0xa8, 0xb7, 0xe7, 0xfd, 0xd7, 0x2f, 0xbf, 0x0d, 0x85, + 0xbd, 0x92, 0xe0, 0x2a, 0xbd, 0x58, 0xc1, 0x55, 0x7e, 0x3e, 0x82, 0xeb, 0x07, 0xa0, 0xe6, 0x06, + 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x15, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, 0xea, 0x50, 0x5f, 0x85, 0x74, + 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, 0x5c, 0xd2, 0xe5, 0x19, 0x50, + 0x8b, 0x92, 0x79, 0x97, 0xb7, 0x22, 0xd1, 0x0b, 0xf7, 0xc1, 0x82, 0xed, 0x58, 0x3d, 0x87, 0xb8, + 0xee, 0x06, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, 0x5e, 0x1a, 0x1e, 0x2f, 0x2d, + 0xec, 0x66, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, 0x79, 0x36, 0xe6, 0xc8, 0x14, + 0xe5, 0x54, 0x32, 0xe5, 0x7a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, 0x2a, 0x48, 0xc5, 0xea, 0x1a, + 0x98, 0x11, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x7d, 0xb9, 0x1b, 0x25, 0xf1, 0x4c, + 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x2d, 0x09, 0x40, 0x69, 0x1b, 0xb8, 0x05, + 0x66, 0x3d, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0xbb, 0x9f, 0x86, 0xa0, 0x2c, 0x3b, + 0xf8, 0x50, 0xd2, 0x23, 0x63, 0xfc, 0x48, 0xb8, 0x5e, 0x20, 0xac, 0x0b, 0x0b, 0x12, 0x78, 0x1b, + 0x4c, 0x39, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, 0x6c, 0x0a, 0xc5, 0x3b, 0x91, + 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, 0xc0, 0x74, 0x1e, 0x8e, 0xbc, + 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, 0x5a, + 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9b, 0x4b, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, 0xa0, + 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x36, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, 0x5f, + 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, 0x27, 0x51, 0x12, 0x79, 0x95, + 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, 0xfc, 0x4e, 0xa6, 0xf9, 0x97, + 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, 0x2e, 0x98, 0x7b, 0xe4, 0xe9, + 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, 0x72, 0xee, 0xfb, 0x19, 0x18, + 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0xa7, 0x2a, 0xf6, 0xa9, 0x0a, 0x54, 0x39, 0x41, 0x05, 0xca, + 0x2c, 0xdc, 0xd5, 0x53, 0x14, 0xee, 0x93, 0x55, 0xda, 0x8c, 0x83, 0x6b, 0xe4, 0xab, 0xff, 0xcf, + 0x15, 0x30, 0x9f, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x6d, 0xe0, 0xc7, 0xf1, 0x8b, 0x8f, 0x51, 0x45, + 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0x7b, 0x26, 0xdd, 0x71, 0xf6, 0xa8, 0xa3, 0x99, + 0x3d, 0xbf, 0xf2, 0x6e, 0x49, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, 0x01, 0x0b, 0x39, 0x95, 0xef, + 0x6c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xef, 0x79, 0x4e, 0x2f, 0xab, 0x56, 0x17, 0x1b, + 0x87, 0x67, 0xf3, 0x96, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x07, 0x5c, 0x96, 0x26, 0xc9, 0x32, 0x87, + 0x3c, 0xf2, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x13, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, + 0x6a, 0x7b, 0x6a, 0x78, 0xbc, 0x34, 0xb1, 0x1b, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x5f, 0x05, 0x54, + 0xf7, 0x3a, 0x58, 0x27, 0x67, 0x50, 0xed, 0x37, 0xa4, 0x6a, 0x9f, 0x7f, 0x93, 0xce, 0xfd, 0xc9, + 0x2d, 0xf4, 0x9b, 0x89, 0x42, 0xff, 0xca, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0x77, 0xc0, 0x44, 0x38, + 0xdc, 0xc9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x32, 0x36, 0xc4, 0x09, 0x8f, 0xaf, 0x87, 0xd2, + 0xb1, 0xcf, 0x12, 0x73, 0xb5, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, 0xd7, 0xa4, 0x4e, 0xfc, 0x05, + 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0x4c, 0x53, 0xec, 0xf4, 0x08, 0x0d, 0xfa, 0xf8, 0x82, 0x4d, 0x44, + 0xb7, 0x13, 0xf7, 0xa5, 0x5e, 0x94, 0x40, 0x2f, 0xde, 0x06, 0x53, 0xd2, 0x60, 0xf0, 0x02, 0x28, + 0x1f, 0x91, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x40, 0xb5, 0x8f, 0x75, 0xcf, 0x8f, 0xf3, + 0x09, 0xe4, 0x3f, 0xdc, 0x2a, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, 0x27, 0x0a, 0xce, 0x33, 0x88, + 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, 0x18, 0x43, 0x89, 0x18, 0x7b, + 0xad, 0x10, 0xdb, 0xb3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x17, 0x43, 0x47, 0x72, 0xf2, 0xdb, 0x92, + 0x9c, 0x5c, 0x4e, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, 0xb4, 0x9e, 0xfc, 0xa3, 0x02, + 0x66, 0x62, 0x6b, 0x77, 0x06, 0x82, 0xf2, 0x9e, 0x2c, 0x28, 0x5f, 0x29, 0x12, 0x34, 0x39, 0x8a, + 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, 0xeb, 0x5b, 0xba, 0x67, 0x90, + 0x75, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, 0x0b, 0xe9, 0x89, 0xe3, 0x6a, + 0x2e, 0x25, 0x26, 0x7d, 0x10, 0x59, 0x46, 0xba, 0xef, 0x41, 0x06, 0x1d, 0xca, 0x1c, 0x04, 0xbe, + 0x09, 0x26, 0x99, 0x7e, 0xd3, 0x3a, 0x64, 0x1b, 0x1b, 0x41, 0x60, 0x85, 0x9f, 0xb0, 0xf6, 0xa2, + 0x2e, 0x14, 0xc7, 0xc1, 0x43, 0x30, 0x6b, 0x5b, 0xdd, 0x2d, 0x6c, 0xe2, 0x1e, 0x61, 0x32, 0x63, + 0x97, 0xff, 0x8f, 0x87, 0x5f, 0x7e, 0x4d, 0xb4, 0xdf, 0x0a, 0x6e, 0x45, 0x76, 0xd3, 0x10, 0xf6, + 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x6a, + 0x91, 0x08, 0x3b, 0xe5, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, 0xa9, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, + 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, 0xcb, 0x27, 0x90, 0xe7, 0x6b, 0x60, + 0x46, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x5d, 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, + 0x5e, 0xf5, 0x84, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, + 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0xe3, 0xb2, 0x42, 0xd8, 0x97, 0x7a, 0x51, + 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0x2b, 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, + 0xdf, 0x14, 0xf0, 0x52, 0x6e, 0x22, 0xc0, 0x35, 0xa9, 0xec, 0xae, 0x24, 0xca, 0xee, 0xb7, 0x72, + 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, + 0x47, 0xd7, 0x5e, 0x79, 0xf2, 0xb4, 0x71, 0xee, 0xe3, 0xa7, 0x8d, 0x73, 0x9f, 0x3e, 0x6d, 0x9c, + 0xfb, 0xd9, 0xb0, 0xa1, 0x3c, 0x19, 0x36, 0x94, 0x8f, 0x87, 0x0d, 0xe5, 0xd3, 0x61, 0x43, 0xf9, + 0xfb, 0xb0, 0xa1, 0xfc, 0xfa, 0xb3, 0xc6, 0xb9, 0x0f, 0xc6, 0xc5, 0x88, 0xff, 0x0b, 0x00, 0x00, + 0xff, 0xff, 0xe4, 0x8f, 0x6a, 0x57, 0x17, 0x29, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index dcfd9c46..d8fac403 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -31,6 +31,8 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta2"; +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain // their internal state. @@ -63,6 +65,8 @@ message ControllerRevisionList { repeated ControllerRevision items = 2; } +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. @@ -84,6 +88,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -99,9 +124,8 @@ message DaemonSetList { message DaemonSetSpec { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; // An object that describes the pod that will be created. @@ -175,6 +199,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } // DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. @@ -192,6 +222,8 @@ message DaemonSetUpdateStrategy { optional RollingUpdateDaemonSet rollingUpdate = 2; } +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. message Deployment { // Standard object metadata. @@ -247,7 +279,7 @@ message DeploymentSpec { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional + // It must match the pod template's labels. optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template describes the pods that will be created. @@ -336,6 +368,8 @@ message DeploymentStrategy { optional RollingUpdateDeployment rollingUpdate = 2; } +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to @@ -407,10 +441,9 @@ message ReplicaSetSpec { optional int32 minReadySeconds = 4; // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // Template is the object that describes the pod that will be created if @@ -549,6 +582,8 @@ message ScaleStatus { optional string targetSelector = 3; } +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -569,6 +604,27 @@ message StatefulSet { optional StatefulSetStatus status = 3; } +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // StatefulSetList is a collection of StatefulSets. message StatefulSetList { // +optional @@ -588,9 +644,8 @@ message StatefulSetSpec { optional int32 replicas = 1; // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // template is the object that describes the pod that will be created if @@ -673,6 +728,12 @@ message StatefulSetStatus { // newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; } // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -688,3 +749,4 @@ message StatefulSetUpdateStrategy { // +optional optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; } + diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go index e207c7dc..2784ee37 100644 --- a/vendor/k8s.io/api/apps/v1beta2/register.go +++ b/vendor/k8s.io/api/apps/v1beta2/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index fa5676e1..c8be2aac 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -28,6 +28,7 @@ const ( StatefulSetRevisionLabel = ControllerRevisionHashLabelKey DeprecatedRollbackTo = "deprecated.deployment.rollback.to" DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" ) // ScaleSpec describes the attributes of a scale subresource @@ -81,6 +82,8 @@ type Scale struct { // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. // StatefulSet represents a set of pods with consistent identities. // Identities are defined as: // - Network: A single stable DNS and hostname. @@ -169,10 +172,9 @@ type StatefulSetSpec struct { Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // template is the object that describes the pod that will be created if // insufficient replicas are detected. Each pod stamped out by the StatefulSet @@ -254,6 +256,31 @@ type StatefulSetStatus struct { // newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -269,6 +296,8 @@ type StatefulSetList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. // Deployment enables declarative updates for Pods and ReplicaSets. type Deployment struct { metav1.TypeMeta `json:",inline"` @@ -294,8 +323,8 @@ type DeploymentSpec struct { // Label selector for pods. Existing ReplicaSets whose pods are // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template describes the pods that will be created. Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` @@ -525,10 +554,9 @@ type RollingUpdateDaemonSet struct { type DaemonSetSpec struct { // A label query over pods that are managed by the daemon set. // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` // An object that describes the pod that will be created. // The DaemonSet will create exactly one copy of this pod on every node @@ -601,11 +629,40 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. // DaemonSet represents the configuration of a daemon set. type DaemonSet struct { metav1.TypeMeta `json:",inline"` @@ -652,6 +709,8 @@ type DaemonSetList struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. // ReplicaSet ensures that a specified number of pod replicas are running at any given time. type ReplicaSet struct { metav1.TypeMeta `json:",inline"` @@ -707,11 +766,10 @@ type ReplicaSetSpec struct { MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` // Template is the object that describes the pod that will be created if // insufficient replicas are detected. @@ -779,6 +837,8 @@ type ReplicaSetCondition struct { // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. // ControllerRevision implements an immutable snapshot of state data. Clients // are responsible for serializing and deserializing the objects that contain // their internal state. diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 35ae28ca..e2f133b5 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE var map_ControllerRevision = map[string]string{ - "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", @@ -49,7 +49,7 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { } var map_DaemonSet = map[string]string{ - "": "DaemonSet represents the configuration of a daemon set.", + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", @@ -59,6 +59,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -71,7 +84,7 @@ func (DaemonSetList) SwaggerDoc() map[string]string { var map_DaemonSetSpec = map[string]string{ "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", @@ -93,6 +106,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -110,7 +124,7 @@ func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { } var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", "metadata": "Standard object metadata.", "spec": "Specification of the desired behavior of the Deployment.", "status": "Most recently observed status of the Deployment.", @@ -147,7 +161,7 @@ func (DeploymentList) SwaggerDoc() map[string]string { var map_DeploymentSpec = map[string]string{ "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", @@ -187,7 +201,7 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { } var map_ReplicaSet = map[string]string{ - "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", @@ -224,7 +238,7 @@ var map_ReplicaSetSpec = map[string]string{ "": "ReplicaSetSpec is the specification of a ReplicaSet.", "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", } @@ -306,7 +320,7 @@ func (ScaleStatus) SwaggerDoc() map[string]string { } var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", "spec": "Spec defines the desired identities of pods in this set.", "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", } @@ -315,6 +329,19 @@ func (StatefulSet) SwaggerDoc() map[string]string { return map_StatefulSet } +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + var map_StatefulSetList = map[string]string{ "": "StatefulSetList is a collection of StatefulSets.", } @@ -326,7 +353,7 @@ func (StatefulSetList) SwaggerDoc() map[string]string { var map_StatefulSetSpec = map[string]string{ "": "A StatefulSetSpec is the specification of a StatefulSet.", "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", @@ -349,6 +376,7 @@ var map_StatefulSetStatus = map[string]string{ "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", } func (StatefulSetStatus) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 124f84a9..d25c869b 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -23,141 +23,10 @@ package v1beta2 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevision).DeepCopyInto(out.(*ControllerRevision)) - return nil - }, InType: reflect.TypeOf(&ControllerRevision{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ControllerRevisionList).DeepCopyInto(out.(*ControllerRevisionList)) - return nil - }, InType: reflect.TypeOf(&ControllerRevisionList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateStatefulSetStrategy).DeepCopyInto(out.(*RollingUpdateStatefulSetStrategy)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateStatefulSetStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSet).DeepCopyInto(out.(*StatefulSet)) - return nil - }, InType: reflect.TypeOf(&StatefulSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetList).DeepCopyInto(out.(*StatefulSetList)) - return nil - }, InType: reflect.TypeOf(&StatefulSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetSpec).DeepCopyInto(out.(*StatefulSetSpec)) - return nil - }, InType: reflect.TypeOf(&StatefulSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetStatus).DeepCopyInto(out.(*StatefulSetStatus)) - return nil - }, InType: reflect.TypeOf(&StatefulSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatefulSetUpdateStrategy).DeepCopyInto(out.(*StatefulSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&StatefulSetUpdateStrategy{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { *out = *in @@ -249,6 +118,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -331,6 +217,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -880,6 +773,23 @@ func (in *StatefulSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in @@ -978,6 +888,13 @@ func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 15b117a4..2d2ed2ee 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index 96f1e776..fb7888b6 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -96,3 +96,4 @@ message UserInfo { // +optional map extra = 4; } + diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go index 936237c2..2ca79a62 100644 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index c1717c1c..243de759 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 065cfbd9..e0de315d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index 1d8431bf..300e5348 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -96,3 +96,4 @@ message UserInfo { // +optional map extra = 4; } + diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go index 7d89e5ef..ed23e50f 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &TokenReview{}, diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index 1b94ecec..aa8d2ef3 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -21,40 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReview).DeepCopyInto(out.(*TokenReview)) - return nil - }, InType: reflect.TypeOf(&TokenReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewSpec).DeepCopyInto(out.(*TokenReviewSpec)) - return nil - }, InType: reflect.TypeOf(&TokenReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TokenReviewStatus).DeepCopyInto(out.(*TokenReviewStatus)) - return nil - }, InType: reflect.TypeOf(&TokenReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserInfo).DeepCopyInto(out.(*UserInfo)) - return nil - }, InType: reflect.TypeOf(&UserInfo{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index b02e09ca..c06b798d 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index 0795a645..d34d105f 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,77 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto + // 1152 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, - 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, + 0x14, 0xf7, 0xae, 0xed, 0xd4, 0x1e, 0x37, 0x24, 0x9d, 0x28, 0xcd, 0x36, 0x15, 0x76, 0xb4, 0x48, + 0x90, 0x8a, 0xb2, 0x4b, 0x4c, 0xdb, 0x44, 0x95, 0x2a, 0x14, 0x2b, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, 0x44, 0xa2, 0x08, 0xc4, 0x78, 0x3d, 0xb1, 0x97, 0xd8, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x38, - 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0x80, 0x90, 0x90, 0xb8, 0x71, 0xe0, 0x80, 0x72, - 0xec, 0xb1, 0x07, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, 0x9b, - 0x70, 0xa0, 0x97, 0xde, 0x76, 0xdf, 0xef, 0xf7, 0xfe, 0xcc, 0x7b, 0x6f, 0xde, 0x3c, 0xf0, 0xe0, - 0x70, 0x93, 0x59, 0xae, 0x6f, 0x1f, 0x86, 0x4d, 0x42, 0x3d, 0xc2, 0x09, 0xb3, 0xfb, 0xc4, 0x6b, - 0xf9, 0xd4, 0x56, 0x00, 0x0e, 0x5c, 0x1b, 0x87, 0xbc, 0xe3, 0x53, 0xf7, 0x1b, 0xcc, 0x5d, 0xdf, - 0xb3, 0xfb, 0xeb, 0x76, 0x9b, 0x78, 0x84, 0x62, 0x4e, 0x5a, 0x56, 0x40, 0x7d, 0xee, 0xc3, 0x1b, - 0x31, 0xd9, 0xc2, 0x81, 0x6b, 0x8d, 0x91, 0xad, 0xfe, 0xfa, 0xca, 0x7b, 0x6d, 0x97, 0x77, 0xc2, - 0xa6, 0xe5, 0xf8, 0x3d, 0xbb, 0xed, 0xb7, 0x7d, 0x5b, 0xea, 0x34, 0xc3, 0x03, 0xf9, 0x27, 0x7f, - 0xe4, 0x57, 0x6c, 0x6b, 0xe5, 0x76, 0xe2, 0xb8, 0x87, 0x9d, 0x8e, 0xeb, 0x11, 0x7a, 0x6c, 0x07, - 0x87, 0x6d, 0x21, 0x60, 0x76, 0x8f, 0x70, 0x9c, 0x11, 0xc1, 0x8a, 0x3d, 0x49, 0x8b, 0x86, 0x1e, - 0x77, 0x7b, 0xe4, 0x9c, 0xc2, 0xdd, 0x97, 0x29, 0x30, 0xa7, 0x43, 0x7a, 0xf8, 0x9c, 0xde, 0x07, - 0x93, 0xf4, 0x42, 0xee, 0x76, 0x6d, 0xd7, 0xe3, 0x8c, 0xd3, 0xb3, 0x4a, 0xe6, 0x06, 0x00, 0xdb, - 0x5f, 0x73, 0x8a, 0xf7, 0x71, 0x37, 0x24, 0xb0, 0x06, 0x8a, 0x2e, 0x27, 0x3d, 0x66, 0x68, 0xab, - 0xf9, 0xb5, 0x72, 0xa3, 0x1c, 0x0d, 0x6a, 0xc5, 0x1d, 0x21, 0x40, 0xb1, 0xfc, 0x5e, 0xe9, 0xfb, - 0x1f, 0x6b, 0xb9, 0x67, 0x7f, 0xae, 0xe6, 0xcc, 0x5f, 0x74, 0x60, 0x3c, 0xf4, 0x1d, 0xdc, 0xdd, - 0x0d, 0x9b, 0x5f, 0x12, 0x87, 0x6f, 0x39, 0x0e, 0x61, 0x0c, 0x91, 0xbe, 0x4b, 0x8e, 0xe0, 0x17, - 0xa0, 0x24, 0xd2, 0xd1, 0xc2, 0x1c, 0x1b, 0xda, 0xaa, 0xb6, 0x56, 0xa9, 0xbf, 0x6f, 0x25, 0x85, - 0x18, 0x45, 0x67, 0x05, 0x87, 0x6d, 0x21, 0x60, 0x96, 0x60, 0x5b, 0xfd, 0x75, 0xeb, 0x63, 0x69, - 0xeb, 0x11, 0xe1, 0xb8, 0x01, 0x4f, 0x06, 0xb5, 0x5c, 0x34, 0xa8, 0x81, 0x44, 0x86, 0x46, 0x56, - 0xe1, 0x3e, 0x28, 0xb0, 0x80, 0x38, 0x86, 0x2e, 0xad, 0xdf, 0xb6, 0xa6, 0x94, 0xd9, 0xca, 0x88, - 0x70, 0x37, 0x20, 0x4e, 0xe3, 0x8a, 0xf2, 0x50, 0x10, 0x7f, 0x48, 0xda, 0x83, 0x9f, 0x83, 0x19, - 0xc6, 0x31, 0x0f, 0x99, 0x91, 0x97, 0x96, 0xef, 0x5e, 0xda, 0xb2, 0xd4, 0x6e, 0xbc, 0xa1, 0x6c, - 0xcf, 0xc4, 0xff, 0x48, 0x59, 0x35, 0x3f, 0x05, 0x4b, 0x8f, 0x7d, 0x0f, 0x11, 0xe6, 0x87, 0xd4, - 0x21, 0x5b, 0x9c, 0x53, 0xb7, 0x19, 0x72, 0xc2, 0xe0, 0x2a, 0x28, 0x04, 0x98, 0x77, 0x64, 0xba, - 0xca, 0x49, 0x68, 0x4f, 0x30, 0xef, 0x20, 0x89, 0x08, 0x46, 0x9f, 0xd0, 0xa6, 0x3c, 0x72, 0x8a, - 0xb1, 0x4f, 0x68, 0x13, 0x49, 0xc4, 0xfc, 0x0a, 0xcc, 0xa7, 0x8c, 0xa3, 0xb0, 0x2b, 0x2b, 0x2a, - 0xa0, 0xb1, 0x8a, 0x0a, 0x0d, 0x86, 0x62, 0x39, 0xbc, 0x0f, 0xe6, 0xbd, 0x44, 0x67, 0x0f, 0x3d, - 0x64, 0x86, 0x2e, 0xa9, 0x8b, 0xd1, 0xa0, 0x96, 0x36, 0x27, 0x20, 0x74, 0x96, 0x6b, 0xfe, 0xa6, - 0x03, 0x98, 0x71, 0x1a, 0x1b, 0x94, 0x3d, 0xdc, 0x23, 0x2c, 0xc0, 0x0e, 0x51, 0x47, 0xba, 0xaa, - 0x02, 0x2e, 0x3f, 0x1e, 0x02, 0x28, 0xe1, 0xbc, 0xfc, 0x70, 0xf0, 0x2d, 0x50, 0x6c, 0x53, 0x3f, - 0x0c, 0x64, 0x61, 0xca, 0x8d, 0x39, 0x45, 0x29, 0x7e, 0x24, 0x84, 0x28, 0xc6, 0xe0, 0x4d, 0x30, - 0xdb, 0x27, 0x94, 0xb9, 0xbe, 0x67, 0x14, 0x24, 0x6d, 0x5e, 0xd1, 0x66, 0xf7, 0x63, 0x31, 0x1a, - 0xe2, 0xf0, 0x16, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x05, 0xc5, 0x2d, 0x8d, 0x32, 0x38, - 0x62, 0xc0, 0x3b, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, 0xca, 0x6e, - 0x02, 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0x98, 0x1d, 0x3f, 0x96, 0x48, 0x01, 0x92, 0x88, - 0xf9, 0xbb, 0x06, 0xae, 0x5c, 0xae, 0x62, 0xef, 0x82, 0x32, 0x0e, 0x5c, 0x79, 0xec, 0x61, 0xad, - 0xe6, 0x44, 0x5e, 0xb7, 0x9e, 0xec, 0xc4, 0x42, 0x94, 0xe0, 0x82, 0x3c, 0x0c, 0x46, 0xb4, 0xf4, - 0x88, 0x3c, 0x74, 0xc9, 0x50, 0x82, 0xc3, 0x0d, 0x30, 0x37, 0xfc, 0x91, 0x45, 0x32, 0x0a, 0x52, - 0xe1, 0x6a, 0x34, 0xa8, 0xcd, 0xa1, 0x34, 0x80, 0xc6, 0x79, 0xe6, 0xaf, 0x3a, 0x58, 0xde, 0x25, - 0xdd, 0x83, 0x57, 0x33, 0x0b, 0x9e, 0x8e, 0xcd, 0x82, 0xcd, 0xe9, 0x37, 0x36, 0x3b, 0xca, 0x57, - 0x36, 0x0f, 0x7e, 0xd0, 0xc1, 0x8d, 0x29, 0x31, 0xc1, 0x23, 0x00, 0xe9, 0xb9, 0xeb, 0xa5, 0xf2, - 0x68, 0x4f, 0x8d, 0xe5, 0xfc, 0xad, 0x6c, 0x5c, 0x8b, 0x06, 0xb5, 0x8c, 0xdb, 0x8a, 0x32, 0x5c, - 0xc0, 0x6f, 0x35, 0xb0, 0xe4, 0x65, 0x4d, 0x2a, 0x95, 0xe6, 0xfa, 0x54, 0xe7, 0x99, 0x33, 0xae, - 0x71, 0x3d, 0x1a, 0xd4, 0xb2, 0xc7, 0x1f, 0xca, 0xf6, 0x25, 0x5e, 0x99, 0x6b, 0xa9, 0xf4, 0x88, - 0x0b, 0xf2, 0xff, 0xf5, 0xd5, 0x27, 0x63, 0x7d, 0xb5, 0x71, 0xd1, 0xbe, 0x4a, 0x05, 0x39, 0xb1, - 0xad, 0x3e, 0x3b, 0xd3, 0x56, 0x77, 0x2e, 0xd2, 0x56, 0x69, 0xc3, 0xd3, 0xbb, 0xea, 0x11, 0x58, - 0x99, 0x1c, 0xd0, 0xa5, 0x87, 0xb3, 0xf9, 0x93, 0x0e, 0x16, 0x5f, 0x3f, 0xf3, 0x97, 0xb9, 0xd6, - 0x7f, 0x14, 0xc0, 0xf2, 0xeb, 0x2b, 0x3d, 0x69, 0xd1, 0x09, 0x19, 0xa1, 0xea, 0x19, 0x1f, 0x15, - 0x67, 0x8f, 0x11, 0x8a, 0x24, 0x02, 0x4d, 0x30, 0xd3, 0x8e, 0x5f, 0xb7, 0xf8, 0xfd, 0x01, 0x22, - 0xc1, 0xea, 0x69, 0x53, 0x08, 0x6c, 0x81, 0x22, 0x11, 0x7b, 0xab, 0x51, 0x5c, 0xcd, 0xaf, 0x55, - 0xea, 0x1f, 0xfe, 0x97, 0xce, 0xb0, 0xe4, 0xe6, 0xbb, 0xed, 0x71, 0x7a, 0x9c, 0xac, 0x13, 0x52, - 0x86, 0x62, 0xe3, 0xf0, 0x4d, 0x90, 0x0f, 0xdd, 0x96, 0x7a, 0xed, 0x2b, 0x8a, 0x92, 0xdf, 0xdb, - 0x79, 0x80, 0x84, 0x7c, 0x05, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0x0b, 0x20, 0x7f, 0x48, 0x8e, 0xe3, - 0x0b, 0x85, 0xc4, 0x27, 0xbc, 0x0f, 0x8a, 0x7d, 0xb1, 0x57, 0xab, 0xfc, 0xbe, 0x33, 0x35, 0xc8, - 0x64, 0x0d, 0x47, 0xb1, 0xd6, 0x3d, 0x7d, 0x53, 0x33, 0x7f, 0xd6, 0xc0, 0xf5, 0x89, 0xed, 0x27, - 0xd6, 0x1d, 0xdc, 0xed, 0xfa, 0x47, 0xa4, 0x25, 0xdd, 0x96, 0x92, 0x75, 0x67, 0x2b, 0x16, 0xa3, - 0x21, 0x0e, 0xdf, 0x06, 0x33, 0x94, 0x60, 0xe6, 0x7b, 0x6a, 0xc5, 0x1a, 0x75, 0x2e, 0x92, 0x52, - 0xa4, 0x50, 0xb8, 0x05, 0xe6, 0x89, 0x70, 0x2f, 0xe3, 0xda, 0xa6, 0xd4, 0x1f, 0x56, 0x6a, 0x59, - 0x29, 0xcc, 0x6f, 0x8f, 0xc3, 0xe8, 0x2c, 0xdf, 0xfc, 0x47, 0x07, 0xc6, 0xa4, 0x91, 0x05, 0x0f, - 0x92, 0x1d, 0x43, 0x82, 0x72, 0xcd, 0xa9, 0xd4, 0x6f, 0x5e, 0xa8, 0xf1, 0x85, 0x46, 0x63, 0x49, - 0x05, 0x32, 0x97, 0x96, 0xa6, 0x56, 0x12, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, 0x77, 0xe1, 0x78, - 0x59, 0xaa, 0xd4, 0x6f, 0x5d, 0xb4, 0xcd, 0xa5, 0x37, 0x43, 0x79, 0x5b, 0x38, 0x03, 0x30, 0x74, - 0xce, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, 0xb6, 0x52, 0x32, - 0xdf, 0x76, 0x46, 0x08, 0x4a, 0xb1, 0xb2, 0xf2, 0x5d, 0xb8, 0x5c, 0xbe, 0x1b, 0x6b, 0x27, 0xa7, - 0xd5, 0xdc, 0xf3, 0xd3, 0x6a, 0xee, 0xc5, 0x69, 0x35, 0xf7, 0x2c, 0xaa, 0x6a, 0x27, 0x51, 0x55, - 0x7b, 0x1e, 0x55, 0xb5, 0x17, 0x51, 0x55, 0xfb, 0x2b, 0xaa, 0x6a, 0xdf, 0xfd, 0x5d, 0xcd, 0x3d, - 0xd5, 0xfb, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xd9, 0x3f, 0xd9, 0x21, 0x54, 0x0f, 0x00, - 0x00, + 0x55, 0xe2, 0x0b, 0x70, 0xe4, 0xc0, 0x81, 0x6f, 0xc0, 0x05, 0x89, 0x1b, 0x07, 0x0e, 0x28, 0xc7, + 0x1e, 0x8b, 0x84, 0x2c, 0xb2, 0x9c, 0xf9, 0x0e, 0x68, 0x66, 0xc7, 0xde, 0x75, 0xb2, 0x76, 0x13, + 0x0e, 0xf4, 0xd2, 0xdb, 0xee, 0xfb, 0xfd, 0xde, 0x9f, 0x79, 0x7f, 0x66, 0x1e, 0xd8, 0x3a, 0xdc, + 0x60, 0x96, 0xeb, 0xdb, 0x87, 0x61, 0x93, 0x50, 0x8f, 0x70, 0xc2, 0xec, 0x3e, 0xf1, 0x5a, 0x3e, + 0xb5, 0x15, 0x80, 0x03, 0xd7, 0xc6, 0x21, 0xef, 0xf8, 0xd4, 0xfd, 0x06, 0x73, 0xd7, 0xf7, 0xec, + 0xfe, 0x9a, 0xdd, 0x26, 0x1e, 0xa1, 0x98, 0x93, 0x96, 0x15, 0x50, 0x9f, 0xfb, 0xf0, 0x66, 0x4c, + 0xb6, 0x70, 0xe0, 0x5a, 0x63, 0x64, 0xab, 0xbf, 0xb6, 0xfc, 0x5e, 0xdb, 0xe5, 0x9d, 0xb0, 0x69, + 0x39, 0x7e, 0xcf, 0x6e, 0xfb, 0x6d, 0xdf, 0x96, 0x3a, 0xcd, 0xf0, 0x40, 0xfe, 0xc9, 0x1f, 0xf9, + 0x15, 0xdb, 0x5a, 0xbe, 0x93, 0x38, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x1e, 0xdb, 0xc1, 0x61, + 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x44, 0xb0, 0x6c, 0x4f, 0xd2, 0xa2, 0xa1, 0xc7, 0xdd, + 0x1e, 0x39, 0xa7, 0x70, 0xef, 0x65, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xa7, 0xf7, 0xc1, 0x24, + 0xbd, 0x90, 0xbb, 0x5d, 0xdb, 0xf5, 0x38, 0xe3, 0xf4, 0xac, 0x92, 0xb9, 0x0e, 0xc0, 0xf6, 0xd7, + 0x9c, 0xe2, 0x7d, 0xdc, 0x0d, 0x09, 0xac, 0x81, 0xa2, 0xcb, 0x49, 0x8f, 0x19, 0xda, 0x4a, 0x7e, + 0xb5, 0xdc, 0x28, 0x47, 0x83, 0x5a, 0x71, 0x47, 0x08, 0x50, 0x2c, 0xbf, 0x5f, 0xfa, 0xfe, 0xc7, + 0x5a, 0xee, 0xd9, 0x9f, 0x2b, 0x39, 0xf3, 0x67, 0x1d, 0x18, 0x0f, 0x7d, 0x07, 0x77, 0x77, 0xc3, + 0xe6, 0x97, 0xc4, 0xe1, 0x9b, 0x8e, 0x43, 0x18, 0x43, 0xa4, 0xef, 0x92, 0x23, 0xf8, 0x05, 0x28, + 0x89, 0x74, 0xb4, 0x30, 0xc7, 0x86, 0xb6, 0xa2, 0xad, 0x56, 0xea, 0xef, 0x5b, 0x49, 0x21, 0x46, + 0xd1, 0x59, 0xc1, 0x61, 0x5b, 0x08, 0x98, 0x25, 0xd8, 0x56, 0x7f, 0xcd, 0xfa, 0x58, 0xda, 0x7a, + 0x44, 0x38, 0x6e, 0xc0, 0x93, 0x41, 0x2d, 0x17, 0x0d, 0x6a, 0x20, 0x91, 0xa1, 0x91, 0x55, 0xb8, + 0x0f, 0x0a, 0x2c, 0x20, 0x8e, 0xa1, 0x4b, 0xeb, 0x77, 0xac, 0x29, 0x65, 0xb6, 0x32, 0x22, 0xdc, + 0x0d, 0x88, 0xd3, 0xb8, 0xaa, 0x3c, 0x14, 0xc4, 0x1f, 0x92, 0xf6, 0xe0, 0xe7, 0x60, 0x86, 0x71, + 0xcc, 0x43, 0x66, 0xe4, 0xa5, 0xe5, 0x7b, 0x97, 0xb6, 0x2c, 0xb5, 0x1b, 0x6f, 0x28, 0xdb, 0x33, + 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc1, 0xe2, 0x63, 0xdf, 0x43, 0x84, 0xf9, 0x21, 0x75, 0xc8, + 0x26, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x02, 0x0a, 0x01, 0xe6, 0x1d, 0x99, 0xae, 0x72, + 0x12, 0xda, 0x13, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, 0x29, 0x8f, 0x9c, 0x62, 0xec, + 0x13, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x02, 0x73, 0x29, 0xe3, 0x28, 0xec, 0xca, 0x8a, 0x0a, 0x68, + 0xac, 0xa2, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x80, 0x39, 0x2f, 0xd1, 0xd9, 0x43, 0x0f, 0x99, + 0xa1, 0x4b, 0xea, 0x42, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, 0xe5, 0x9a, 0xbf, 0xea, 0x00, + 0x66, 0x9c, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, 0xd4, 0x91, 0xae, 0xa9, 0x80, + 0xcb, 0x8f, 0x87, 0x00, 0x4a, 0x38, 0x2f, 0x3f, 0x1c, 0x7c, 0x0b, 0x14, 0xdb, 0xd4, 0x0f, 0x03, + 0x59, 0x98, 0x72, 0x63, 0x56, 0x51, 0x8a, 0x1f, 0x09, 0x21, 0x8a, 0x31, 0x78, 0x0b, 0x5c, 0xe9, + 0x13, 0xca, 0x5c, 0xdf, 0x33, 0x0a, 0x92, 0x36, 0xa7, 0x68, 0x57, 0xf6, 0x63, 0x31, 0x1a, 0xe2, + 0xf0, 0x36, 0x28, 0x51, 0x15, 0xb8, 0x51, 0x94, 0xdc, 0x79, 0xc5, 0x2d, 0x8d, 0x32, 0x38, 0x62, + 0xc0, 0xbb, 0xa0, 0xc2, 0xc2, 0xe6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x28, 0x85, 0xca, 0x6e, 0x02, + 0xa1, 0x34, 0x4f, 0x1c, 0x4b, 0x9c, 0xd1, 0xb8, 0x32, 0x7e, 0x2c, 0x91, 0x02, 0x24, 0x11, 0xf3, + 0x37, 0x0d, 0x5c, 0xbd, 0x5c, 0xc5, 0xde, 0x05, 0x65, 0x1c, 0xb8, 0xf2, 0xd8, 0xc3, 0x5a, 0xcd, + 0x8a, 0xbc, 0x6e, 0x3e, 0xd9, 0x89, 0x85, 0x28, 0xc1, 0x05, 0x79, 0x18, 0x8c, 0x68, 0xe9, 0x11, + 0x79, 0xe8, 0x92, 0xa1, 0x04, 0x87, 0xeb, 0x60, 0x76, 0xf8, 0x23, 0x8b, 0x64, 0x14, 0xa4, 0xc2, + 0xb5, 0x68, 0x50, 0x9b, 0x45, 0x69, 0x00, 0x8d, 0xf3, 0xcc, 0x5f, 0x74, 0xb0, 0xb4, 0x4b, 0xba, + 0x07, 0xaf, 0xe6, 0x2e, 0x78, 0x3a, 0x76, 0x17, 0x6c, 0x4c, 0x9f, 0xd8, 0xec, 0x28, 0x5f, 0xd9, + 0x7d, 0xf0, 0x83, 0x0e, 0x6e, 0x4e, 0x89, 0x09, 0x1e, 0x01, 0x48, 0xcf, 0x8d, 0x97, 0xca, 0xa3, + 0x3d, 0x35, 0x96, 0xf3, 0x53, 0xd9, 0xb8, 0x1e, 0x0d, 0x6a, 0x19, 0xd3, 0x8a, 0x32, 0x5c, 0xc0, + 0x6f, 0x35, 0xb0, 0xe8, 0x65, 0xdd, 0x54, 0x2a, 0xcd, 0xf5, 0xa9, 0xce, 0x33, 0xef, 0xb8, 0xc6, + 0x8d, 0x68, 0x50, 0xcb, 0xbe, 0xfe, 0x50, 0xb6, 0x2f, 0xf1, 0xca, 0x5c, 0x4f, 0xa5, 0x47, 0x0c, + 0xc8, 0xff, 0xd7, 0x57, 0x9f, 0x8c, 0xf5, 0xd5, 0xfa, 0x45, 0xfb, 0x2a, 0x15, 0xe4, 0xc4, 0xb6, + 0xfa, 0xec, 0x4c, 0x5b, 0xdd, 0xbd, 0x48, 0x5b, 0xa5, 0x0d, 0x4f, 0xef, 0xaa, 0x47, 0x60, 0x79, + 0x72, 0x40, 0x97, 0xbe, 0x9c, 0xcd, 0x9f, 0x74, 0xb0, 0xf0, 0xfa, 0x99, 0xbf, 0xcc, 0x58, 0xff, + 0x5e, 0x00, 0x4b, 0xaf, 0x47, 0x7a, 0xd2, 0xa2, 0x13, 0x32, 0x42, 0xd5, 0x33, 0x3e, 0x2a, 0xce, + 0x1e, 0x23, 0x14, 0x49, 0x04, 0x9a, 0x60, 0xa6, 0x1d, 0xbf, 0x6e, 0xf1, 0xfb, 0x03, 0x44, 0x82, + 0xd5, 0xd3, 0xa6, 0x10, 0xd8, 0x02, 0x45, 0x22, 0xf6, 0x56, 0xa3, 0xb8, 0x92, 0x5f, 0xad, 0xd4, + 0x3f, 0xfc, 0x2f, 0x9d, 0x61, 0xc9, 0xcd, 0x77, 0xdb, 0xe3, 0xf4, 0x38, 0x59, 0x27, 0xa4, 0x0c, + 0xc5, 0xc6, 0xe1, 0x9b, 0x20, 0x1f, 0xba, 0x2d, 0xf5, 0xda, 0x57, 0x14, 0x25, 0xbf, 0xb7, 0xb3, + 0x85, 0x84, 0x7c, 0x19, 0xab, 0xe5, 0x59, 0x9a, 0x80, 0xf3, 0x20, 0x7f, 0x48, 0x8e, 0xe3, 0x81, + 0x42, 0xe2, 0x13, 0x3e, 0x00, 0xc5, 0xbe, 0xd8, 0xab, 0x55, 0x7e, 0xdf, 0x99, 0x1a, 0x64, 0xb2, + 0x86, 0xa3, 0x58, 0xeb, 0xbe, 0xbe, 0xa1, 0x99, 0x7f, 0x68, 0xe0, 0xc6, 0xc4, 0xf6, 0x13, 0xeb, + 0x0e, 0xee, 0x76, 0xfd, 0x23, 0xd2, 0x92, 0x6e, 0x4b, 0xc9, 0xba, 0xb3, 0x19, 0x8b, 0xd1, 0x10, + 0x87, 0x6f, 0x83, 0x19, 0x4a, 0x30, 0xf3, 0x3d, 0xb5, 0x62, 0x8d, 0x3a, 0x17, 0x49, 0x29, 0x52, + 0x28, 0xdc, 0x04, 0x73, 0x44, 0xb8, 0x97, 0x71, 0x6d, 0x53, 0xea, 0x0f, 0x2b, 0xb5, 0xa4, 0x14, + 0xe6, 0xb6, 0xc7, 0x61, 0x74, 0x96, 0x2f, 0x5c, 0xb5, 0x88, 0xe7, 0x92, 0x96, 0xdc, 0xc1, 0x4a, + 0x89, 0xab, 0x2d, 0x29, 0x45, 0x0a, 0x35, 0xff, 0xd1, 0x81, 0x31, 0xe9, 0x6a, 0x83, 0x07, 0xc9, + 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2e, 0x34, 0x20, 0x42, 0xa3, 0xb1, 0xa8, 0xdc, + 0xce, 0xa6, 0xa5, 0xa9, 0xd5, 0x45, 0xfe, 0x42, 0x0a, 0xe6, 0xbd, 0xf1, 0x9d, 0x39, 0x5e, 0xaa, + 0x2a, 0xf5, 0xdb, 0x17, 0x1d, 0x07, 0xe9, 0xcd, 0x50, 0xde, 0xe6, 0xcf, 0x00, 0x0c, 0x9d, 0xb3, + 0x0f, 0xeb, 0x00, 0xb8, 0x9e, 0xe3, 0xf7, 0x82, 0x2e, 0xe1, 0x44, 0xa6, 0xb7, 0x94, 0xdc, 0x83, + 0x3b, 0x23, 0x04, 0xa5, 0x58, 0x59, 0x75, 0x29, 0x5c, 0xae, 0x2e, 0x8d, 0xd5, 0x93, 0xd3, 0x6a, + 0xee, 0xf9, 0x69, 0x35, 0xf7, 0xe2, 0xb4, 0x9a, 0x7b, 0x16, 0x55, 0xb5, 0x93, 0xa8, 0xaa, 0x3d, + 0x8f, 0xaa, 0xda, 0x8b, 0xa8, 0xaa, 0xfd, 0x15, 0x55, 0xb5, 0xef, 0xfe, 0xae, 0xe6, 0x9e, 0xea, + 0xfd, 0xb5, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x0e, 0xab, 0x82, 0x7c, 0x0f, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto index e3ce0274..28c8d660 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -226,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -263,3 +270,4 @@ message SubjectRulesReviewStatus { // +optional optional string evaluationError = 4; } + diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go index d716eaa9..59311981 100644 --- a/vendor/k8s.io/api/authorization/v1/register.go +++ b/vendor/k8s.io/api/authorization/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index 23b5ae70..86b05c54 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go index 1e3f780d..85503660 100644 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go index 916974ff..a415b2b1 100644 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 6809b811..ea4f802e 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index e1f49d46..9b1ad299 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -730,6 +730,14 @@ func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ return i, nil } @@ -1015,6 +1023,7 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.EvaluationError) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } @@ -1205,6 +1214,7 @@ func (this *SubjectAccessReviewStatus) String() string { `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, `}`, }, "") return s @@ -3130,6 +3140,26 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } m.EvaluationError = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3422,77 +3452,78 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 1139 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xf6, 0xfa, 0x47, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0xd1, 0xee, 0x92, 0x50, 0x48, 0x09, 0xf4, 0x10, 0xab, 0x11, 0x8a, 0xd4, 0x96, 0x6a, 0xa2, - 0xe4, 0x40, 0x25, 0x60, 0x76, 0x33, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, 0x87, - 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0x3b, 0x47, 0x2e, 0x48, 0x70, 0xca, 0xb1, 0xc7, 0x1c, 0x90, - 0x45, 0x96, 0x3f, 0x82, 0x2b, 0x9a, 0xd9, 0xb1, 0x77, 0x1d, 0xaf, 0xe3, 0x24, 0x87, 0xf6, 0xd2, - 0xdb, 0xce, 0xfb, 0xde, 0xf7, 0xde, 0x9b, 0x37, 0xef, 0xbd, 0x7d, 0x60, 0xa7, 0x7d, 0x9f, 0x19, - 0x8e, 0x67, 0xb6, 0x03, 0x8b, 0x50, 0x97, 0x70, 0xc2, 0xcc, 0x1e, 0x71, 0x0f, 0x3c, 0x6a, 0x2a, - 0x00, 0xfb, 0x8e, 0x89, 0x03, 0xde, 0xf2, 0xa8, 0xf3, 0x3d, 0xe6, 0x8e, 0xe7, 0x9a, 0xbd, 0x35, - 0x8b, 0x70, 0xbc, 0x66, 0x36, 0x89, 0x4b, 0x28, 0xe6, 0xe4, 0xc0, 0xf0, 0xa9, 0xc7, 0x3d, 0x58, - 0x8b, 0x18, 0x06, 0xf6, 0x1d, 0x63, 0x84, 0x61, 0x28, 0xc6, 0xca, 0xdd, 0xa6, 0xc3, 0x5b, 0x81, - 0x65, 0xd8, 0x5e, 0xd7, 0x6c, 0x7a, 0x4d, 0xcf, 0x94, 0x44, 0x2b, 0x38, 0x94, 0x27, 0x79, 0x90, - 0x5f, 0x91, 0xc1, 0x95, 0x7b, 0x71, 0x08, 0x5d, 0x6c, 0xb7, 0x1c, 0x97, 0xd0, 0x63, 0xd3, 0x6f, - 0x37, 0x85, 0x80, 0x99, 0x5d, 0xc2, 0xb1, 0xd9, 0x1b, 0x0b, 0x63, 0xc5, 0x9c, 0xc4, 0xa2, 0x81, - 0xcb, 0x9d, 0x2e, 0x19, 0x23, 0x7c, 0x34, 0x8d, 0xc0, 0xec, 0x16, 0xe9, 0xe2, 0x31, 0xde, 0x07, - 0x93, 0x78, 0x01, 0x77, 0x3a, 0xa6, 0xe3, 0x72, 0xc6, 0xe9, 0x79, 0x52, 0x7d, 0x03, 0x80, 0xed, - 0xef, 0x38, 0xc5, 0xfb, 0xb8, 0x13, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x2e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0xfd, 0x6a, 0x61, 0x47, 0x08, 0x50, 0x24, 0xdf, 0x2c, 0xfe, 0xfc, - 0x4b, 0x35, 0xf3, 0xfc, 0xef, 0x5a, 0xa6, 0xfe, 0x47, 0x16, 0xe8, 0x8f, 0x3c, 0x1b, 0x77, 0x76, - 0x03, 0xeb, 0x1b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x73, 0xc8, 0x11, 0xfc, 0x1a, - 0x14, 0x45, 0x3a, 0x0e, 0x30, 0xc7, 0xba, 0x56, 0xd3, 0x56, 0xcb, 0xeb, 0xef, 0x1b, 0xf1, 0x6b, - 0x0c, 0xa3, 0x33, 0xfc, 0x76, 0x53, 0x08, 0x98, 0x21, 0xb4, 0x8d, 0xde, 0x9a, 0xf1, 0xb9, 0xb4, - 0xf5, 0x98, 0x70, 0xdc, 0x80, 0x27, 0xfd, 0x6a, 0x26, 0xec, 0x57, 0x41, 0x2c, 0x43, 0x43, 0xab, - 0xf0, 0x19, 0xc8, 0x33, 0x9f, 0xd8, 0x7a, 0x56, 0x5a, 0xff, 0xd8, 0x98, 0xf6, 0xd6, 0x46, 0x4a, - 0x98, 0xbb, 0x3e, 0xb1, 0x1b, 0x37, 0x94, 0x9b, 0xbc, 0x38, 0x21, 0x69, 0x14, 0xda, 0x60, 0x86, - 0x71, 0xcc, 0x03, 0xa6, 0xe7, 0xa4, 0xf9, 0x4f, 0xae, 0x67, 0x5e, 0x9a, 0x68, 0xbc, 0xa1, 0x1c, - 0xcc, 0x44, 0x67, 0xa4, 0x4c, 0xd7, 0x9f, 0x81, 0xa5, 0x27, 0x9e, 0x8b, 0x08, 0xf3, 0x02, 0x6a, - 0x93, 0x2d, 0xce, 0xa9, 0x63, 0x05, 0x9c, 0x30, 0x58, 0x03, 0x79, 0x1f, 0xf3, 0x96, 0x4c, 0x5c, - 0x29, 0x8e, 0xef, 0x29, 0xe6, 0x2d, 0x24, 0x11, 0xa1, 0xd1, 0x23, 0xd4, 0x92, 0x97, 0x4f, 0x68, - 0xec, 0x13, 0x6a, 0x21, 0x89, 0xd4, 0xbf, 0x05, 0xf3, 0x09, 0xe3, 0x28, 0xe8, 0xc8, 0xb7, 0x15, - 0xd0, 0xc8, 0xdb, 0x0a, 0x06, 0x43, 0x91, 0x1c, 0x3e, 0x00, 0xf3, 0x6e, 0xcc, 0xd9, 0x43, 0x8f, - 0x98, 0x9e, 0x95, 0xaa, 0x8b, 0x61, 0xbf, 0x9a, 0x34, 0x27, 0x20, 0x74, 0x5e, 0x57, 0x14, 0x04, - 0x4c, 0xb9, 0x8d, 0x09, 0x4a, 0x2e, 0xee, 0x12, 0xe6, 0x63, 0x9b, 0xa8, 0x2b, 0xdd, 0x54, 0x01, - 0x97, 0x9e, 0x0c, 0x00, 0x14, 0xeb, 0x4c, 0xbf, 0x1c, 0x7c, 0x1b, 0x14, 0x9a, 0xd4, 0x0b, 0x7c, - 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0x52, 0x29, 0x7c, 0x26, 0x84, 0x28, 0xc2, 0xe0, 0xbb, 0x60, 0xb6, - 0x47, 0x28, 0x73, 0x3c, 0x57, 0xcf, 0x4b, 0xb5, 0x79, 0xa5, 0x36, 0xbb, 0x1f, 0x89, 0xd1, 0x00, - 0x87, 0x77, 0x40, 0x91, 0xaa, 0xc0, 0xf5, 0x82, 0xd4, 0x5d, 0x50, 0xba, 0xc5, 0x61, 0x06, 0x87, - 0x1a, 0xf0, 0x43, 0x50, 0x66, 0x81, 0x35, 0x24, 0xcc, 0x48, 0xc2, 0xa2, 0x22, 0x94, 0x77, 0x63, - 0x08, 0x25, 0xf5, 0xc4, 0xb5, 0xc4, 0x1d, 0xf5, 0xd9, 0xd1, 0x6b, 0x89, 0x14, 0x20, 0x89, 0xd4, - 0xff, 0xd2, 0xc0, 0x8d, 0xab, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x5e, 0x7b, 0xf0, 0x56, - 0x73, 0x22, 0xaf, 0x5b, 0x4f, 0x77, 0x22, 0x21, 0x8a, 0x71, 0xa1, 0x3c, 0x08, 0x46, 0xd4, 0xf5, - 0x50, 0x79, 0xe0, 0x92, 0xa1, 0x18, 0x87, 0x1b, 0x60, 0x6e, 0x70, 0x90, 0x8f, 0xa4, 0xe7, 0x25, - 0xe1, 0x66, 0xd8, 0xaf, 0xce, 0xa1, 0x24, 0x80, 0x46, 0xf5, 0xea, 0x7f, 0x66, 0xc1, 0xf2, 0x2e, - 0xe9, 0x1c, 0xbe, 0x9a, 0xa9, 0xf0, 0xd5, 0xc8, 0x54, 0x78, 0x70, 0x89, 0xb6, 0x4d, 0x0f, 0xf5, - 0xd5, 0x4e, 0x86, 0x5f, 0xb3, 0xe0, 0xcd, 0x0b, 0x02, 0x83, 0x3f, 0x00, 0x48, 0xc7, 0x1a, 0x4d, - 0x65, 0xf4, 0xde, 0xf4, 0x80, 0xc6, 0x9b, 0xb4, 0x71, 0x2b, 0xec, 0x57, 0x53, 0x9a, 0x17, 0xa5, - 0xf8, 0x81, 0x3f, 0x6a, 0x60, 0xc9, 0x4d, 0x1b, 0x5c, 0x2a, 0xeb, 0x1b, 0xd3, 0x23, 0x48, 0x9d, - 0x7b, 0x8d, 0xdb, 0x61, 0xbf, 0x9a, 0x3e, 0x12, 0x51, 0xba, 0x43, 0x31, 0x72, 0x6e, 0x25, 0x12, - 0x25, 0x9a, 0xe6, 0xe5, 0xd5, 0xda, 0x97, 0x23, 0xb5, 0xf6, 0xe9, 0x95, 0x6a, 0x2d, 0x11, 0xe9, - 0xc4, 0x52, 0xb3, 0xce, 0x95, 0xda, 0xe6, 0xa5, 0x4b, 0x2d, 0x69, 0xfd, 0xe2, 0x4a, 0x7b, 0x0c, - 0x56, 0x26, 0x47, 0x75, 0xe5, 0xd1, 0x5d, 0xff, 0x3d, 0x0b, 0x16, 0x5f, 0xaf, 0x03, 0xd7, 0x6b, - 0xfa, 0xd3, 0x3c, 0x58, 0x7e, 0xdd, 0xf0, 0x17, 0x37, 0xbc, 0xf8, 0x89, 0x06, 0x8c, 0x50, 0xf5, - 0xe3, 0x1f, 0xbe, 0xd5, 0x1e, 0x23, 0x14, 0x49, 0x04, 0xd6, 0x06, 0xbb, 0x41, 0xf4, 0xc3, 0x02, - 0x22, 0xd3, 0xea, 0x5f, 0xa8, 0x16, 0x03, 0x07, 0x14, 0x88, 0xd8, 0x78, 0xf5, 0x42, 0x2d, 0xb7, - 0x5a, 0x5e, 0x7f, 0x78, 0xed, 0x5a, 0x31, 0xe4, 0xe2, 0xbc, 0xed, 0x72, 0x7a, 0x1c, 0xef, 0x20, - 0x52, 0x86, 0x22, 0x0f, 0xf0, 0x2d, 0x90, 0x0b, 0x9c, 0x03, 0xb5, 0x22, 0x94, 0x95, 0x4a, 0x6e, - 0x6f, 0xe7, 0x21, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, 0xda, 0xe4, - 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0x7a, 0x62, 0x2d, 0x57, 0x79, 0xbe, 0x33, 0x3d, - 0xd2, 0x78, 0x95, 0x47, 0x11, 0x75, 0x33, 0x7b, 0x5f, 0xab, 0xff, 0xa6, 0x81, 0xdb, 0x13, 0x0b, - 0x52, 0x2c, 0x4a, 0xb8, 0xd3, 0xf1, 0x8e, 0xc8, 0x81, 0xf4, 0x5d, 0x8c, 0x17, 0xa5, 0xad, 0x48, - 0x8c, 0x06, 0x38, 0x7c, 0x07, 0xcc, 0x50, 0x82, 0x99, 0xe7, 0xaa, 0xe5, 0x6c, 0x58, 0xcb, 0x48, - 0x4a, 0x91, 0x42, 0xe1, 0x16, 0x98, 0x27, 0xc2, 0xbd, 0x0c, 0x6e, 0x9b, 0x52, 0x6f, 0xf0, 0x62, - 0xcb, 0x8a, 0x30, 0xbf, 0x3d, 0x0a, 0xa3, 0xf3, 0xfa, 0xf5, 0xff, 0xb2, 0x40, 0x9f, 0x34, 0xce, - 0x60, 0x3b, 0xde, 0x4e, 0x24, 0x28, 0x17, 0xa4, 0xf2, 0xba, 0x71, 0xf9, 0x56, 0x10, 0xb4, 0xc6, - 0x92, 0x8a, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x34, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, 0xba, 0x4a, - 0x47, 0xbb, 0x56, 0x79, 0x7d, 0xed, 0x4a, 0x85, 0x2f, 0x5d, 0xea, 0xca, 0xe5, 0xc2, 0x39, 0x80, - 0xa1, 0x31, 0x27, 0x70, 0x1d, 0x00, 0xc7, 0xb5, 0xbd, 0xae, 0xdf, 0x21, 0x9c, 0xc8, 0x04, 0x16, - 0xe3, 0x29, 0xb8, 0x33, 0x44, 0x50, 0x42, 0x2b, 0x2d, 0xf3, 0xf9, 0xab, 0x65, 0xbe, 0x71, 0xf7, - 0xe4, 0xac, 0x92, 0x79, 0x71, 0x56, 0xc9, 0x9c, 0x9e, 0x55, 0x32, 0xcf, 0xc3, 0x8a, 0x76, 0x12, - 0x56, 0xb4, 0x17, 0x61, 0x45, 0x3b, 0x0d, 0x2b, 0xda, 0x3f, 0x61, 0x45, 0xfb, 0xe9, 0xdf, 0x4a, - 0xe6, 0x8b, 0x59, 0x75, 0xc3, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0x29, 0xa9, 0x9d, 0x7c, 0xb1, - 0x0f, 0x00, 0x00, + // 1154 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xf7, 0xfa, 0x25, 0xb1, 0xc7, 0xcd, 0x3f, 0xe9, 0x44, 0x69, 0xb6, 0xf9, 0x0b, 0xdb, 0x32, + 0x12, 0x0a, 0xa2, 0xdd, 0x25, 0xa1, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, + 0x44, 0xc9, 0x81, 0x4a, 0xc0, 0x78, 0x3d, 0xb1, 0x17, 0xdb, 0xbb, 0xcb, 0xcc, 0xac, 0x43, 0x10, + 0x87, 0x1e, 0x39, 0x72, 0xe4, 0xc8, 0x89, 0xef, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, + 0x64, 0x91, 0xe5, 0x43, 0x70, 0x45, 0x33, 0x3b, 0xf6, 0xae, 0xe3, 0x75, 0x1c, 0xe7, 0x40, 0x2f, + 0xbd, 0xed, 0x3c, 0xbf, 0xe7, 0x6d, 0x9e, 0x97, 0xd9, 0x1f, 0xd8, 0x6f, 0x3f, 0x64, 0x86, 0xed, + 0x9a, 0x6d, 0xbf, 0x4e, 0xa8, 0x43, 0x38, 0x61, 0x66, 0x8f, 0x38, 0x0d, 0x97, 0x9a, 0x0a, 0xc0, + 0x9e, 0x6d, 0x62, 0x9f, 0xb7, 0x5c, 0x6a, 0x7f, 0x8b, 0xb9, 0xed, 0x3a, 0x66, 0x6f, 0xa3, 0x4e, + 0x38, 0xde, 0x30, 0x9b, 0xc4, 0x21, 0x14, 0x73, 0xd2, 0x30, 0x3c, 0xea, 0x72, 0x17, 0x56, 0x42, + 0x0b, 0x03, 0x7b, 0xb6, 0x31, 0x62, 0x61, 0x28, 0x8b, 0xb5, 0xfb, 0x4d, 0x9b, 0xb7, 0xfc, 0xba, + 0x61, 0xb9, 0x5d, 0xb3, 0xe9, 0x36, 0x5d, 0x53, 0x1a, 0xd6, 0xfd, 0x63, 0x79, 0x92, 0x07, 0xf9, + 0x15, 0x3a, 0x5c, 0x7b, 0x10, 0xa5, 0xd0, 0xc5, 0x56, 0xcb, 0x76, 0x08, 0x3d, 0x35, 0xbd, 0x76, + 0x53, 0x08, 0x98, 0xd9, 0x25, 0x1c, 0x9b, 0xbd, 0xb1, 0x34, 0xd6, 0xcc, 0x49, 0x56, 0xd4, 0x77, + 0xb8, 0xdd, 0x25, 0x63, 0x06, 0x1f, 0x4c, 0x33, 0x60, 0x56, 0x8b, 0x74, 0xf1, 0x98, 0xdd, 0x7b, + 0x93, 0xec, 0x7c, 0x6e, 0x77, 0x4c, 0xdb, 0xe1, 0x8c, 0xd3, 0xcb, 0x46, 0xd5, 0x2d, 0x00, 0xf6, + 0xbe, 0xe1, 0x14, 0x1f, 0xe1, 0x8e, 0x4f, 0x60, 0x19, 0xe4, 0x6c, 0x4e, 0xba, 0x4c, 0xd7, 0x2a, + 0x99, 0xf5, 0x42, 0xad, 0x10, 0xf4, 0xcb, 0xb9, 0x7d, 0x21, 0x40, 0xa1, 0x7c, 0x3b, 0xff, 0xe3, + 0x4f, 0xe5, 0xd4, 0x8b, 0x3f, 0x2b, 0xa9, 0xea, 0xaf, 0x69, 0xa0, 0x3f, 0x76, 0x2d, 0xdc, 0x39, + 0xf0, 0xeb, 0x5f, 0x11, 0x8b, 0xef, 0x58, 0x16, 0x61, 0x0c, 0x91, 0x9e, 0x4d, 0x4e, 0xe0, 0x97, + 0x20, 0x2f, 0xca, 0xd1, 0xc0, 0x1c, 0xeb, 0x5a, 0x45, 0x5b, 0x2f, 0x6e, 0xbe, 0x6b, 0x44, 0xdd, + 0x18, 0x66, 0x67, 0x78, 0xed, 0xa6, 0x10, 0x30, 0x43, 0x68, 0x1b, 0xbd, 0x0d, 0xe3, 0x53, 0xe9, + 0xeb, 0x09, 0xe1, 0xb8, 0x06, 0xcf, 0xfa, 0xe5, 0x54, 0xd0, 0x2f, 0x83, 0x48, 0x86, 0x86, 0x5e, + 0xe1, 0x73, 0x90, 0x65, 0x1e, 0xb1, 0xf4, 0xb4, 0xf4, 0xfe, 0xa1, 0x31, 0xad, 0xd7, 0x46, 0x42, + 0x9a, 0x07, 0x1e, 0xb1, 0x6a, 0xb7, 0x54, 0x98, 0xac, 0x38, 0x21, 0xe9, 0x14, 0x5a, 0x60, 0x8e, + 0x71, 0xcc, 0x7d, 0xa6, 0x67, 0xa4, 0xfb, 0x8f, 0x6e, 0xe6, 0x5e, 0xba, 0xa8, 0xfd, 0x4f, 0x05, + 0x98, 0x0b, 0xcf, 0x48, 0xb9, 0xae, 0x3e, 0x07, 0x2b, 0x4f, 0x5d, 0x07, 0x11, 0xe6, 0xfa, 0xd4, + 0x22, 0x3b, 0x9c, 0x53, 0xbb, 0xee, 0x73, 0xc2, 0x60, 0x05, 0x64, 0x3d, 0xcc, 0x5b, 0xb2, 0x70, + 0x85, 0x28, 0xbf, 0x67, 0x98, 0xb7, 0x90, 0x44, 0x84, 0x46, 0x8f, 0xd0, 0xba, 0xbc, 0x7c, 0x4c, + 0xe3, 0x88, 0xd0, 0x3a, 0x92, 0x48, 0xf5, 0x6b, 0xb0, 0x18, 0x73, 0x8e, 0xfc, 0x8e, 0xec, 0xad, + 0x80, 0x46, 0x7a, 0x2b, 0x2c, 0x18, 0x0a, 0xe5, 0xf0, 0x11, 0x58, 0x74, 0x22, 0x9b, 0x43, 0xf4, + 0x98, 0xe9, 0x69, 0xa9, 0xba, 0x1c, 0xf4, 0xcb, 0x71, 0x77, 0x02, 0x42, 0x97, 0x75, 0xc5, 0x40, + 0xc0, 0x84, 0xdb, 0x98, 0xa0, 0xe0, 0xe0, 0x2e, 0x61, 0x1e, 0xb6, 0x88, 0xba, 0xd2, 0x6d, 0x95, + 0x70, 0xe1, 0xe9, 0x00, 0x40, 0x91, 0xce, 0xf4, 0xcb, 0xc1, 0x37, 0x41, 0xae, 0x49, 0x5d, 0xdf, + 0x93, 0xdd, 0x29, 0xd4, 0x16, 0x94, 0x4a, 0xee, 0x13, 0x21, 0x44, 0x21, 0x06, 0xdf, 0x06, 0xf3, + 0x3d, 0x42, 0x99, 0xed, 0x3a, 0x7a, 0x56, 0xaa, 0x2d, 0x2a, 0xb5, 0xf9, 0xa3, 0x50, 0x8c, 0x06, + 0x38, 0xbc, 0x07, 0xf2, 0x54, 0x25, 0xae, 0xe7, 0xa4, 0xee, 0x92, 0xd2, 0xcd, 0x0f, 0x2b, 0x38, + 0xd4, 0x80, 0xef, 0x83, 0x22, 0xf3, 0xeb, 0x43, 0x83, 0x39, 0x69, 0xb0, 0xac, 0x0c, 0x8a, 0x07, + 0x11, 0x84, 0xe2, 0x7a, 0xe2, 0x5a, 0xe2, 0x8e, 0xfa, 0xfc, 0xe8, 0xb5, 0x44, 0x09, 0x90, 0x44, + 0xaa, 0xbf, 0x6b, 0xe0, 0xd6, 0x6c, 0x1d, 0x7b, 0x07, 0x14, 0xb0, 0x67, 0xcb, 0x6b, 0x0f, 0x7a, + 0xb5, 0x20, 0xea, 0xba, 0xf3, 0x6c, 0x3f, 0x14, 0xa2, 0x08, 0x17, 0xca, 0x83, 0x64, 0xc4, 0x5c, + 0x0f, 0x95, 0x07, 0x21, 0x19, 0x8a, 0x70, 0xb8, 0x05, 0x16, 0x06, 0x07, 0xd9, 0x24, 0x3d, 0x2b, + 0x0d, 0x6e, 0x07, 0xfd, 0xf2, 0x02, 0x8a, 0x03, 0x68, 0x54, 0xaf, 0xfa, 0x5b, 0x1a, 0xac, 0x1e, + 0x90, 0xce, 0xf1, 0xab, 0x79, 0x15, 0xbe, 0x18, 0x79, 0x15, 0x1e, 0x5d, 0x63, 0x6d, 0x93, 0x53, + 0x7d, 0xb5, 0x2f, 0xc3, 0xcf, 0x69, 0xf0, 0xff, 0x2b, 0x12, 0x83, 0xdf, 0x01, 0x48, 0xc7, 0x16, + 0x4d, 0x55, 0xf4, 0xc1, 0xf4, 0x84, 0xc6, 0x97, 0xb4, 0x76, 0x27, 0xe8, 0x97, 0x13, 0x96, 0x17, + 0x25, 0xc4, 0x81, 0xdf, 0x6b, 0x60, 0xc5, 0x49, 0x7a, 0xb8, 0x54, 0xd5, 0xb7, 0xa6, 0x67, 0x90, + 0xf8, 0xee, 0xd5, 0xee, 0x06, 0xfd, 0x72, 0xf2, 0x93, 0x88, 0x92, 0x03, 0x8a, 0x27, 0xe7, 0x4e, + 0xac, 0x50, 0x62, 0x69, 0xfe, 0xbb, 0x59, 0xfb, 0x7c, 0x64, 0xd6, 0x3e, 0x9e, 0x69, 0xd6, 0x62, + 0x99, 0x4e, 0x1c, 0xb5, 0xfa, 0xa5, 0x51, 0xdb, 0xbe, 0xf6, 0xa8, 0xc5, 0xbd, 0x5f, 0x3d, 0x69, + 0x4f, 0xc0, 0xda, 0xe4, 0xac, 0x66, 0x7e, 0xba, 0xab, 0xbf, 0xa4, 0xc1, 0xf2, 0x6b, 0x3a, 0x70, + 0xb3, 0xa5, 0x3f, 0xcf, 0x82, 0xd5, 0xd7, 0x0b, 0x7f, 0xf5, 0xc2, 0x8b, 0x9f, 0xa8, 0xcf, 0x08, + 0x55, 0x3f, 0xfe, 0x61, 0xaf, 0x0e, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x32, 0xe0, 0x06, 0xe1, 0x0f, + 0x0b, 0x88, 0x4a, 0xab, 0x7f, 0xa1, 0x22, 0x06, 0x36, 0xc8, 0x11, 0xc1, 0x78, 0xf5, 0x5c, 0x25, + 0xb3, 0x5e, 0xdc, 0xdc, 0xbd, 0xf1, 0xac, 0x18, 0x92, 0x38, 0xef, 0x39, 0x9c, 0x9e, 0x46, 0x1c, + 0x44, 0xca, 0x50, 0x18, 0x01, 0xbe, 0x01, 0x32, 0xbe, 0xdd, 0x50, 0x14, 0xa1, 0xa8, 0x54, 0x32, + 0x87, 0xfb, 0xbb, 0x48, 0xc8, 0xd7, 0x8e, 0x15, 0xf7, 0x96, 0x2e, 0xe0, 0x12, 0xc8, 0xb4, 0xc9, + 0x69, 0xb8, 0x67, 0x48, 0x7c, 0xc2, 0x1a, 0xc8, 0xf5, 0x04, 0x2d, 0x57, 0x75, 0xbe, 0x37, 0x3d, + 0xd3, 0x88, 0xca, 0xa3, 0xd0, 0x74, 0x3b, 0xfd, 0x50, 0xab, 0xfe, 0xa1, 0x81, 0xbb, 0x13, 0x07, + 0x52, 0x10, 0x25, 0xdc, 0xe9, 0xb8, 0x27, 0xa4, 0x21, 0x63, 0xe7, 0x23, 0xa2, 0xb4, 0x13, 0x8a, + 0xd1, 0x00, 0x87, 0x6f, 0x81, 0x39, 0x4a, 0x30, 0x73, 0x1d, 0x45, 0xce, 0x86, 0xb3, 0x8c, 0xa4, + 0x14, 0x29, 0x14, 0xee, 0x80, 0x45, 0x22, 0xc2, 0xcb, 0xe4, 0xf6, 0x28, 0x75, 0x07, 0x1d, 0x5b, + 0x55, 0x06, 0x8b, 0x7b, 0xa3, 0x30, 0xba, 0xac, 0x2f, 0x42, 0x35, 0x88, 0x63, 0x93, 0x86, 0x64, + 0x6f, 0xf9, 0x28, 0xd4, 0xae, 0x94, 0x22, 0x85, 0x56, 0xff, 0x49, 0x03, 0x7d, 0xd2, 0xb3, 0x07, + 0xdb, 0x11, 0x8b, 0x91, 0xa0, 0x24, 0x52, 0xc5, 0x4d, 0xe3, 0xfa, 0x2b, 0x23, 0xcc, 0x6a, 0x2b, + 0x2a, 0xf6, 0x42, 0x5c, 0x1a, 0x63, 0x3e, 0xf2, 0x08, 0x4f, 0xc0, 0x92, 0x33, 0x4a, 0xb9, 0x43, + 0x4e, 0x56, 0xdc, 0xdc, 0x98, 0x69, 0x41, 0x64, 0x48, 0x5d, 0x85, 0x5c, 0xba, 0x04, 0x30, 0x34, + 0x16, 0x04, 0x6e, 0x02, 0x60, 0x3b, 0x96, 0xdb, 0xf5, 0x3a, 0x84, 0x13, 0x59, 0xe8, 0x7c, 0xf4, + 0x5a, 0xee, 0x0f, 0x11, 0x14, 0xd3, 0x4a, 0xea, 0x50, 0x76, 0xb6, 0x0e, 0xd5, 0xee, 0x9f, 0x5d, + 0x94, 0x52, 0x2f, 0x2f, 0x4a, 0xa9, 0xf3, 0x8b, 0x52, 0xea, 0x45, 0x50, 0xd2, 0xce, 0x82, 0x92, + 0xf6, 0x32, 0x28, 0x69, 0xe7, 0x41, 0x49, 0xfb, 0x2b, 0x28, 0x69, 0x3f, 0xfc, 0x5d, 0x4a, 0x7d, + 0x36, 0xaf, 0x6e, 0xf8, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcc, 0xb3, 0x5e, 0x05, 0xd9, 0x0f, + 0x00, 0x00, } diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto index 1394725d..59df0b6d 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -226,9 +226,16 @@ message SubjectAccessReviewSpec { // SubjectAccessReviewStatus message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. optional bool allowed = 1; + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + // Reason is optional. It indicates why a request was allowed or denied. // +optional optional string reason = 2; @@ -263,3 +270,4 @@ message SubjectRulesReviewStatus { // +optional optional string evaluationError = 4; } + diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go index d8116d5a..84255dd6 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/register.go +++ b/vendor/k8s.io/api/authorization/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &SelfSubjectRulesReview{}, diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index f62f5956..618ff8c0 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -169,8 +169,14 @@ type SelfSubjectAccessReviewSpec struct { // SubjectAccessReviewStatus type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. + // Allowed is required. True if the action would be allowed, false otherwise. Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` // Reason is optional. It indicates why a request was allowed or denied. // +optional Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go index 544c2fa4..2371b21c 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -148,7 +148,8 @@ func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { var map_SubjectAccessReviewStatus = map[string]string{ "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", "reason": "Reason is optional. It indicates why a request was allowed or denied.", "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", } diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go index aeb77ddb..fcb0067a 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -21,76 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalSubjectAccessReview).DeepCopyInto(out.(*LocalSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&LocalSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceAttributes).DeepCopyInto(out.(*NonResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&NonResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NonResourceRule).DeepCopyInto(out.(*NonResourceRule)) - return nil - }, InType: reflect.TypeOf(&NonResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceAttributes).DeepCopyInto(out.(*ResourceAttributes)) - return nil - }, InType: reflect.TypeOf(&ResourceAttributes{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRule).DeepCopyInto(out.(*ResourceRule)) - return nil - }, InType: reflect.TypeOf(&ResourceRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReview).DeepCopyInto(out.(*SelfSubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectAccessReviewSpec).DeepCopyInto(out.(*SelfSubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReview).DeepCopyInto(out.(*SelfSubjectRulesReview)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SelfSubjectRulesReviewSpec).DeepCopyInto(out.(*SelfSubjectRulesReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SelfSubjectRulesReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReview).DeepCopyInto(out.(*SubjectAccessReview)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReview{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewSpec).DeepCopyInto(out.(*SubjectAccessReviewSpec)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectAccessReviewStatus).DeepCopyInto(out.(*SubjectAccessReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectAccessReviewStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SubjectRulesReviewStatus).DeepCopyInto(out.(*SubjectRulesReviewStatus)) - return nil - }, InType: reflect.TypeOf(&SubjectRulesReviewStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 1689adb5..9c3be845 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index 33b96f46..e41e6274 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -318,3 +318,4 @@ message ScaleStatus { // +optional optional string selector = 2; } + diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go index 521c0e4a..8dfe361e 100644 --- a/vendor/k8s.io/api/autoscaling/v1/register.go +++ b/vendor/k8s.io/api/autoscaling/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index 20848f20..de4e6daa 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -23,92 +23,9 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 689f369b..da9789e5 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index 6d970cdc..de3d2665 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -298,3 +298,4 @@ message ResourceMetricStatus { // It will always be set, regardless of the corresponding metric specification. optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; } + diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go index 9025b421..12d697f0 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/register.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &HorizontalPodAutoscaler{}, diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index b538e5b4..0bb3dd30 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -23,80 +23,9 @@ package v2beta1 import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CrossVersionObjectReference).DeepCopyInto(out.(*CrossVersionObjectReference)) - return nil - }, InType: reflect.TypeOf(&CrossVersionObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscaler).DeepCopyInto(out.(*HorizontalPodAutoscaler)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscaler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerCondition).DeepCopyInto(out.(*HorizontalPodAutoscalerCondition)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerList).DeepCopyInto(out.(*HorizontalPodAutoscalerList)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerSpec).DeepCopyInto(out.(*HorizontalPodAutoscalerSpec)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HorizontalPodAutoscalerStatus).DeepCopyInto(out.(*HorizontalPodAutoscalerStatus)) - return nil - }, InType: reflect.TypeOf(&HorizontalPodAutoscalerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricSpec).DeepCopyInto(out.(*MetricSpec)) - return nil - }, InType: reflect.TypeOf(&MetricSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MetricStatus).DeepCopyInto(out.(*MetricStatus)) - return nil - }, InType: reflect.TypeOf(&MetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricSource).DeepCopyInto(out.(*ObjectMetricSource)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMetricStatus).DeepCopyInto(out.(*ObjectMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ObjectMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricSource).DeepCopyInto(out.(*PodsMetricSource)) - return nil - }, InType: reflect.TypeOf(&PodsMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodsMetricStatus).DeepCopyInto(out.(*PodsMetricStatus)) - return nil - }, InType: reflect.TypeOf(&PodsMetricStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricSource).DeepCopyInto(out.(*ResourceMetricSource)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceMetricStatus).DeepCopyInto(out.(*ResourceMetricStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceMetricStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index 52a1d903..04491807 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 43d8acc7..0f43d2fd 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -170,3 +170,4 @@ message JobStatus { // +optional optional int32 failed = 6; } + diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go index 2cdfc98c..32fa51f0 100644 --- a/vendor/k8s.io/api/batch/v1/register.go +++ b/vendor/k8s.io/api/batch/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Job{}, diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index 8738bd5d..fa9ff4f9 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -22,44 +22,9 @@ package v1 import ( meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Job).DeepCopyInto(out.(*Job)) - return nil - }, InType: reflect.TypeOf(&Job{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobCondition).DeepCopyInto(out.(*JobCondition)) - return nil - }, InType: reflect.TypeOf(&JobCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobList).DeepCopyInto(out.(*JobList)) - return nil - }, InType: reflect.TypeOf(&JobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobSpec).DeepCopyInto(out.(*JobSpec)) - return nil - }, InType: reflect.TypeOf(&JobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobStatus).DeepCopyInto(out.(*JobStatus)) - return nil - }, InType: reflect.TypeOf(&JobStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Job) DeepCopyInto(out *Job) { *out = *in diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 653ebe04..43020ed0 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index d98d5978..11fa4e2a 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; @@ -132,3 +135,4 @@ message JobTemplateSpec { // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } + diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go index 569817d3..226de49f 100644 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index ad13bba2..cb5c9bad 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index f4c74e43..3b53ac08 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index c730ca98..0f8562f8 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 288275d6..f4ed01ad 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 14ad77b4..cc90d419 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -71,7 +71,10 @@ message CronJobSpec { optional int64 startingDeadlineSeconds = 2; // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional optional string concurrencyPolicy = 3; @@ -130,3 +133,4 @@ message JobTemplateSpec { // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } + diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go index 4b02c0f4..ac7fa508 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/register.go +++ b/vendor/k8s.io/api/batch/v2alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &JobTemplate{}, diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index 451df40e..cccff94f 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -100,7 +100,10 @@ type CronJobSpec struct { StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` // Specifies how to treat concurrent executions of a Job. - // Defaults to Allow. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one // +optional ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index 2b3ed4c5..d166b807 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -52,7 +52,7 @@ var map_CronJobSpec = map[string]string{ "": "CronJobSpec describes how the job execution will look like and when it will actually run.", "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Defaults to Allow.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", "jobTemplate": "Specifies the job that will be created when executing a CronJob.", "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index b572f9ca..5474235d 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -23,48 +23,9 @@ package v2alpha1 import ( v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJob).DeepCopyInto(out.(*CronJob)) - return nil - }, InType: reflect.TypeOf(&CronJob{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobList).DeepCopyInto(out.(*CronJobList)) - return nil - }, InType: reflect.TypeOf(&CronJobList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobSpec).DeepCopyInto(out.(*CronJobSpec)) - return nil - }, InType: reflect.TypeOf(&CronJobSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CronJobStatus).DeepCopyInto(out.(*CronJobStatus)) - return nil - }, InType: reflect.TypeOf(&CronJobStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplate).DeepCopyInto(out.(*JobTemplate)) - return nil - }, InType: reflect.TypeOf(&JobTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*JobTemplateSpec).DeepCopyInto(out.(*JobTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&JobTemplateSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CronJob) DeepCopyInto(out *CronJob) { *out = *in diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index d98ee7c3..fb23aadb 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 88f51be1..e90f4f9c 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -119,3 +119,4 @@ message ExtraValue { repeated string items = 1; } + diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go index b5ba3d7e..b4f3af9b 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/register.go +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -46,7 +46,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &CertificateSigningRequest{}, diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 6ecc1311..de0715db 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -21,44 +21,9 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequest).DeepCopyInto(out.(*CertificateSigningRequest)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequest{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestCondition).DeepCopyInto(out.(*CertificateSigningRequestCondition)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestList).DeepCopyInto(out.(*CertificateSigningRequestList)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestSpec).DeepCopyInto(out.(*CertificateSigningRequestSpec)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CertificateSigningRequestStatus).DeepCopyInto(out.(*CertificateSigningRequestStatus)) - return nil - }, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { *out = *in diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index e623913f..de4e3cee 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -45,12 +45,6 @@ const ( // to one container of a pod. SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - // CreatedByAnnotation represents the key used to store the spec(json) - // used to create the resource. - // This field is deprecated in favor of ControllerRef (see #44407). - // TODO(#50720): Remove this field in v1.9. - CreatedByAnnotation = "kubernetes.io/created-by" - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) // in the Annotations of a Node. PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index a31af9ea..96994c62 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -15,7 +15,7 @@ limitations under the License. */ // +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 1cd0bdc7..c2fe6bc5 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -33,6 +33,7 @@ limitations under the License. AzureFilePersistentVolumeSource AzureFileVolumeSource Binding + CSIPersistentVolumeSource Capabilities CephFSPersistentVolumeSource CephFSVolumeSource @@ -71,6 +72,7 @@ limitations under the License. EnvVarSource Event EventList + EventSeries EventSource ExecAction FCVolumeSource @@ -84,6 +86,7 @@ limitations under the License. Handler HostAlias HostPathVolumeSource + ISCSIPersistentVolumeSource ISCSIVolumeSource KeyToPath Lifecycle @@ -138,6 +141,8 @@ limitations under the License. PodAntiAffinity PodAttachOptions PodCondition + PodDNSConfig + PodDNSConfigOption PodExecOptions PodList PodLogOptions @@ -200,6 +205,7 @@ limitations under the License. Taint Toleration Volume + VolumeDevice VolumeMount VolumeProjection VolumeSource @@ -272,738 +278,766 @@ func (m *Binding) Reset() { *m = Binding{} } func (*Binding) ProtoMessage() {} func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} +} + func (m *Capabilities) Reset() { *m = Capabilities{} } func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } func (*CephFSPersistentVolumeSource) ProtoMessage() {} func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} + return fileDescriptorGenerated, []int{10} } func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *ConfigMap) Reset() { *m = ConfigMap{} } func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *Container) Reset() { *m = Container{} } func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *ContainerImage) Reset() { *m = ContainerImage{} } func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *ContainerPort) Reset() { *m = ContainerPort{} } func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *ContainerState) Reset() { *m = ContainerState{} } func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } func (*ContainerStateTerminated) ProtoMessage() {} func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } func (*DownwardAPIVolumeSource) ProtoMessage() {} func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *EndpointPort) Reset() { *m = EndpointPort{} } func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *Endpoints) Reset() { *m = Endpoints{} } func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *EndpointsList) Reset() { *m = EndpointsList{} } func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *EnvVar) Reset() { *m = EnvVar{} } func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *Event) Reset() { *m = Event{} } func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *EventList) Reset() { *m = EventList{} } func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *EventSource) Reset() { *m = EventSource{} } func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } func (m *ExecAction) Reset() { *m = ExecAction{} } func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{53} } func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } func (m *Handler) Reset() { *m = Handler{} } func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } func (m *HostAlias) Reset() { *m = HostAlias{} } func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{61} +} func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } func (m *KeyToPath) Reset() { *m = KeyToPath{} } func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } func (m *Lifecycle) Reset() { *m = Lifecycle{} } func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } func (m *LimitRange) Reset() { *m = LimitRange{} } func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{63} } +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } func (m *List) Reset() { *m = List{} } func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } func (m *ListOptions) Reset() { *m = ListOptions{} } func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } func (m *Namespace) Reset() { *m = Namespace{} } func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } func (m *NamespaceList) Reset() { *m = NamespaceList{} } func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } func (m *Node) Reset() { *m = Node{} } func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } func (m *NodeAddress) Reset() { *m = NodeAddress{} } func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } func (m *NodeCondition) Reset() { *m = NodeCondition{} } func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } func (m *NodeList) Reset() { *m = NodeList{} } func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } func (m *NodeResources) Reset() { *m = NodeResources{} } func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } func (m *NodeSelector) Reset() { *m = NodeSelector{} } func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } func (*NodeSelectorRequirement) ProtoMessage() {} func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{87} + return fileDescriptorGenerated, []int{90} } func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } func (m *NodeSpec) Reset() { *m = NodeSpec{} } func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } func (m *NodeStatus) Reset() { *m = NodeStatus{} } func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{92} } +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } func (m *ObjectReference) Reset() { *m = ObjectReference{} } func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } func (*PersistentVolumeClaimCondition) ProtoMessage() {} func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{97} + return fileDescriptorGenerated, []int{100} } func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } func (*PersistentVolumeClaimList) ProtoMessage() {} func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{98} + return fileDescriptorGenerated, []int{101} } func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } func (*PersistentVolumeClaimSpec) ProtoMessage() {} func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{99} + return fileDescriptorGenerated, []int{102} } func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } func (*PersistentVolumeClaimStatus) ProtoMessage() {} func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{100} + return fileDescriptorGenerated, []int{103} } func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} + return fileDescriptorGenerated, []int{104} } func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{102} } +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{105} } func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } func (*PersistentVolumeSource) ProtoMessage() {} func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} + return fileDescriptorGenerated, []int{106} } func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{104} } +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } func (*PersistentVolumeStatus) ProtoMessage() {} func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} + return fileDescriptorGenerated, []int{108} } func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{106} + return fileDescriptorGenerated, []int{109} } func (m *Pod) Reset() { *m = Pod{} } func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{107} } +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } func (m *PodAffinity) Reset() { *m = PodAffinity{} } func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{109} } +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{110} } +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } func (m *PodCondition) Reset() { *m = PodCondition{} } func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } func (m *PodList) Reset() { *m = PodList{} } func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } func (m *PodSignature) Reset() { *m = PodSignature{} } func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } func (m *PodSpec) Reset() { *m = PodSpec{} } func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } func (m *PodStatus) Reset() { *m = PodStatus{} } func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } func (m *PodTemplate) Reset() { *m = PodTemplate{} } func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } func (*PreferredSchedulingTerm) ProtoMessage() {} func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{129} + return fileDescriptorGenerated, []int{134} } func (m *Probe) Reset() { *m = Probe{} } func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } func (*RBDPersistentVolumeSource) ProtoMessage() {} func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{133} + return fileDescriptorGenerated, []int{138} } func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{140} } func (m *ReplicationController) Reset() { *m = ReplicationController{} } func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{136} } +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } func (*ReplicationControllerCondition) ProtoMessage() {} func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{137} + return fileDescriptorGenerated, []int{142} } func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } func (*ReplicationControllerList) ProtoMessage() {} func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{138} + return fileDescriptorGenerated, []int{143} } func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } func (*ReplicationControllerSpec) ProtoMessage() {} func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{139} + return fileDescriptorGenerated, []int{144} } func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } func (*ReplicationControllerStatus) ProtoMessage() {} func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} + return fileDescriptorGenerated, []int{145} } func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{144} } +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{145} } +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{146} } +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{147} } +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{148} + return fileDescriptorGenerated, []int{153} } func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } func (m *Secret) Reset() { *m = Secret{} } func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } func (m *SecretList) Reset() { *m = SecretList{} } func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } func (m *SecretProjection) Reset() { *m = SecretProjection{} } func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } func (m *SecretReference) Reset() { *m = SecretReference{} } func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{155} } +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } func (m *SecurityContext) Reset() { *m = SecurityContext{} } func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } func (m *SerializedReference) Reset() { *m = SerializedReference{} } func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{158} } +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } func (m *Service) Reset() { *m = Service{} } func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } func (m *ServiceList) Reset() { *m = ServiceList{} } func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } func (m *ServicePort) Reset() { *m = ServicePort{} } func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } func (*StorageOSPersistentVolumeSource) ProtoMessage() {} func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{168} + return fileDescriptorGenerated, []int{173} } func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } func (m *Sysctl) Reset() { *m = Sysctl{} } func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{171} } +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } func (m *Taint) Reset() { *m = Taint{} } func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } func (m *Toleration) Reset() { *m = Toleration{} } func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{178} } func (m *Volume) Reset() { *m = Volume{} } func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } func (m *VolumeMount) Reset() { *m = VolumeMount{} } func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } func (m *VolumeSource) Reset() { *m = VolumeSource{} } func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} + return fileDescriptorGenerated, []int{184} } func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } func (*WeightedPodAffinityTerm) ProtoMessage() {} func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{179} + return fileDescriptorGenerated, []int{185} } func init() { @@ -1015,6 +1049,7 @@ func init() { proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") @@ -1053,6 +1088,7 @@ func init() { proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") @@ -1066,6 +1102,7 @@ func init() { proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") @@ -1120,6 +1157,8 @@ func init() { proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") @@ -1182,6 +1221,7 @@ func init() { proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") @@ -1491,6 +1531,40 @@ func (m *Binding) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i += copy(dAtA[i:], m.VolumeHandle) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} + func (m *Capabilities) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2294,6 +2368,20 @@ func (m *Container) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) i += copy(dAtA[i:], m.TerminationMessagePolicy) + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -3203,6 +3291,46 @@ func (m *Event) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n48, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n49, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n50, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + } + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) return i, nil } @@ -3224,11 +3352,11 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n48, err := m.ListMeta.MarshalTo(dAtA[i:]) + n51, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n48 + i += n51 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -3244,6 +3372,39 @@ func (m *EventList) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n52, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + func (m *EventSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3395,11 +3556,11 @@ func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n49, err := m.SecretRef.MarshalTo(dAtA[i:]) + n53, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n49 + i += n53 } dAtA[i] = 0x20 i++ @@ -3583,11 +3744,11 @@ func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n50, err := m.Port.MarshalTo(dAtA[i:]) + n54, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n50 + i += n54 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -3656,31 +3817,31 @@ func (m *Handler) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n51, err := m.Exec.MarshalTo(dAtA[i:]) + n55, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n55 } if m.HTTPGet != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n52, err := m.HTTPGet.MarshalTo(dAtA[i:]) + n56, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n56 } if m.TCPSocket != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n53, err := m.TCPSocket.MarshalTo(dAtA[i:]) + n57, err := m.TCPSocket.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n57 } return i, nil } @@ -3750,6 +3911,98 @@ func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } + return i, nil +} + func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3819,11 +4072,11 @@ func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n54, err := m.SecretRef.MarshalTo(dAtA[i:]) + n59, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n59 } dAtA[i] = 0x58 i++ @@ -3892,21 +4145,21 @@ func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n55, err := m.PostStart.MarshalTo(dAtA[i:]) + n60, err := m.PostStart.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n60 } if m.PreStop != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n56, err := m.PreStop.MarshalTo(dAtA[i:]) + n61, err := m.PreStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n61 } return i, nil } @@ -3929,19 +4182,19 @@ func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n57, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n62 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n58, err := m.Spec.MarshalTo(dAtA[i:]) + n63, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n63 return i, nil } @@ -3988,11 +4241,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n59, err := (&v).MarshalTo(dAtA[i:]) + n64, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n59 + i += n64 } } if len(m.Min) > 0 { @@ -4019,11 +4272,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n60, err := (&v).MarshalTo(dAtA[i:]) + n65, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n60 + i += n65 } } if len(m.Default) > 0 { @@ -4050,11 +4303,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n61, err := (&v).MarshalTo(dAtA[i:]) + n66, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n61 + i += n66 } } if len(m.DefaultRequest) > 0 { @@ -4081,11 +4334,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n62, err := (&v).MarshalTo(dAtA[i:]) + n67, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n62 + i += n67 } } if len(m.MaxLimitRequestRatio) > 0 { @@ -4112,11 +4365,11 @@ func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n63, err := (&v).MarshalTo(dAtA[i:]) + n68, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n63 + i += n68 } } return i, nil @@ -4140,11 +4393,11 @@ func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) + n69, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n64 + i += n69 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4208,11 +4461,11 @@ func (m *List) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) + n70, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n65 + i += n70 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4431,27 +4684,27 @@ func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n66, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n71, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n66 + i += n71 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n67, err := m.Spec.MarshalTo(dAtA[i:]) + n72, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n67 + i += n72 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n68, err := m.Status.MarshalTo(dAtA[i:]) + n73, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n68 + i += n73 return i, nil } @@ -4473,11 +4726,11 @@ func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n69, err := m.ListMeta.MarshalTo(dAtA[i:]) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n69 + i += n74 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4566,27 +4819,27 @@ func (m *Node) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n70, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n75, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n70 + i += n75 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n71, err := m.Spec.MarshalTo(dAtA[i:]) + n76, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n71 + i += n76 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n72, err := m.Status.MarshalTo(dAtA[i:]) + n77, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n72 + i += n77 return i, nil } @@ -4635,11 +4888,11 @@ func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n73, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) + n78, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n73 + i += n78 } if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { @@ -4682,19 +4935,19 @@ func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n74, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + n79, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n74 + i += n79 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n75, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n80, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n75 + i += n80 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -4725,11 +4978,11 @@ func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n76, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + n81, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n76 + i += n81 } return i, nil } @@ -4752,11 +5005,11 @@ func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n77, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + n82, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n77 + i += n82 return i, nil } @@ -4778,11 +5031,11 @@ func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n78, err := m.ListMeta.MarshalTo(dAtA[i:]) + n83, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n78 + i += n83 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -4859,11 +5112,11 @@ func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n79, err := (&v).MarshalTo(dAtA[i:]) + n84, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n79 + i += n84 } } return i, nil @@ -5021,11 +5274,11 @@ func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n80, err := m.ConfigSource.MarshalTo(dAtA[i:]) + n85, err := m.ConfigSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n80 + i += n85 } return i, nil } @@ -5069,11 +5322,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n81, err := (&v).MarshalTo(dAtA[i:]) + n86, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n81 + i += n86 } } if len(m.Allocatable) > 0 { @@ -5100,11 +5353,11 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n82, err := (&v).MarshalTo(dAtA[i:]) + n87, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n82 + i += n87 } } dAtA[i] = 0x1a @@ -5138,19 +5391,19 @@ func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n83, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + n88, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n83 + i += n88 dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n84, err := m.NodeInfo.MarshalTo(dAtA[i:]) + n89, err := m.NodeInfo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n84 + i += n89 if len(m.Images) > 0 { for _, msg := range m.Images { dAtA[i] = 0x42 @@ -5322,20 +5575,20 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n85, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + n90, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n85 + i += n90 if m.DeletionTimestamp != nil { dAtA[i] = 0x4a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n86, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + n91, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n86 + i += n91 } if m.DeletionGracePeriodSeconds != nil { dAtA[i] = 0x50 @@ -5423,11 +5676,11 @@ func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n87, err := m.Initializers.MarshalTo(dAtA[i:]) + n92, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n87 + i += n92 } return i, nil } @@ -5496,27 +5749,27 @@ func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n88, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n93, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n88 + i += n93 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n89, err := m.Spec.MarshalTo(dAtA[i:]) + n94, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n89 + i += n94 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n90, err := m.Status.MarshalTo(dAtA[i:]) + n95, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n90 + i += n95 return i, nil } @@ -5538,27 +5791,27 @@ func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n91, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n96, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n91 + i += n96 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n92, err := m.Spec.MarshalTo(dAtA[i:]) + n97, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n92 + i += n97 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n93, err := m.Status.MarshalTo(dAtA[i:]) + n98, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n93 + i += n98 return i, nil } @@ -5588,19 +5841,19 @@ func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n94, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n99, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n94 + i += n99 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n95, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n100, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n95 + i += n100 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -5630,11 +5883,11 @@ func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n96, err := m.ListMeta.MarshalTo(dAtA[i:]) + n101, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n96 + i += n101 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5683,11 +5936,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n97, err := m.Resources.MarshalTo(dAtA[i:]) + n102, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n97 + i += n102 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) @@ -5696,11 +5949,11 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n98, err := m.Selector.MarshalTo(dAtA[i:]) + n103, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n98 + i += n103 } if m.StorageClassName != nil { dAtA[i] = 0x2a @@ -5708,6 +5961,12 @@ func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) i += copy(dAtA[i:], *m.StorageClassName) } + if m.VolumeMode != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -5769,11 +6028,11 @@ func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n99, err := (&v).MarshalTo(dAtA[i:]) + n104, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n99 + i += n104 } } if len(m.Conditions) > 0 { @@ -5839,11 +6098,11 @@ func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n100, err := m.ListMeta.MarshalTo(dAtA[i:]) + n105, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n100 + i += n105 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -5878,163 +6137,163 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n101, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n102, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n102 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n103, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n103 - } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n104, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n104 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n105, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n105 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n106, err := m.RBD.MarshalTo(dAtA[i:]) + n106, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n106 } - if m.ISCSI != nil { - dAtA[i] = 0x3a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n107, err := m.ISCSI.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n107, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n107 } - if m.Cinder != nil { - dAtA[i] = 0x42 + if m.HostPath != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n108, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n108, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n108 } - if m.CephFS != nil { - dAtA[i] = 0x4a + if m.Glusterfs != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n109, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n109, err := m.Glusterfs.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n109 } - if m.FC != nil { - dAtA[i] = 0x52 + if m.NFS != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n110, err := m.FC.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n110, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n110 } - if m.Flocker != nil { - dAtA[i] = 0x5a + if m.RBD != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n111, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n111, err := m.RBD.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n111 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.ISCSI != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n112, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n112, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n112 } - if m.AzureFile != nil { - dAtA[i] = 0x6a + if m.Cinder != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n113, err := m.AzureFile.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n113, err := m.Cinder.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n113 } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 + if m.CephFS != nil { + dAtA[i] = 0x4a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n114, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n114, err := m.CephFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n114 } - if m.Quobyte != nil { - dAtA[i] = 0x7a + if m.FC != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n115, err := m.Quobyte.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n115, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n115 } + if m.Flocker != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n116, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n117, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n117 + } + if m.AzureFile != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n118, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n118 + } + if m.VsphereVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n119, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n119 + } + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n120, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + } if m.AzureDisk != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n116, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n121, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n116 + i += n121 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0x8a @@ -6042,11 +6301,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n117, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n122, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n117 + i += n122 } if m.PortworxVolume != nil { dAtA[i] = 0x92 @@ -6054,11 +6313,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n118, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n123, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n118 + i += n123 } if m.ScaleIO != nil { dAtA[i] = 0x9a @@ -6066,11 +6325,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n119, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n124, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n119 + i += n124 } if m.Local != nil { dAtA[i] = 0xa2 @@ -6078,11 +6337,11 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n120, err := m.Local.MarshalTo(dAtA[i:]) + n125, err := m.Local.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n120 + i += n125 } if m.StorageOS != nil { dAtA[i] = 0xaa @@ -6090,11 +6349,23 @@ func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n121, err := m.StorageOS.MarshalTo(dAtA[i:]) + n126, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n121 + i += n126 + } + if m.CSI != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n127, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n127 } return i, nil } @@ -6138,21 +6409,21 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n122, err := (&v).MarshalTo(dAtA[i:]) + n128, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n122 + i += n128 } } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n123, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + n129, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n123 + i += n129 if len(m.AccessModes) > 0 { for _, s := range m.AccessModes { dAtA[i] = 0x1a @@ -6172,11 +6443,11 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n124, err := m.ClaimRef.MarshalTo(dAtA[i:]) + n130, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n124 + i += n130 } dAtA[i] = 0x2a i++ @@ -6201,6 +6472,12 @@ func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], s) } } + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } return i, nil } @@ -6278,27 +6555,27 @@ func (m *Pod) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n125, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n131, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n125 + i += n131 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n126, err := m.Spec.MarshalTo(dAtA[i:]) + n132, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n126 + i += n132 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n127, err := m.Status.MarshalTo(dAtA[i:]) + n133, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n127 + i += n133 return i, nil } @@ -6363,11 +6640,11 @@ func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n128, err := m.LabelSelector.MarshalTo(dAtA[i:]) + n134, err := m.LabelSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n128 + i += n134 } if len(m.Namespaces) > 0 { for _, s := range m.Namespaces { @@ -6513,19 +6790,19 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n129, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + n135, err := m.LastProbeTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n129 + i += n135 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n130, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n136, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n130 + i += n136 dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -6537,6 +6814,94 @@ func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil +} + func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6624,11 +6989,11 @@ func (m *PodList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n131, err := m.ListMeta.MarshalTo(dAtA[i:]) + n137, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n131 + i += n137 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -6688,11 +7053,11 @@ func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n132, err := m.SinceTime.MarshalTo(dAtA[i:]) + n138, err := m.SinceTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n132 + i += n138 } dAtA[i] = 0x30 i++ @@ -6781,11 +7146,11 @@ func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n133, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n139, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n133 + i += n139 } if m.RunAsUser != nil { dAtA[i] = 0x10 @@ -6836,11 +7201,11 @@ func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n134, err := m.PodController.MarshalTo(dAtA[i:]) + n140, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n134 + i += n140 } return i, nil } @@ -6964,11 +7329,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n135, err := m.SecurityContext.MarshalTo(dAtA[i:]) + n141, err := m.SecurityContext.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n135 + i += n141 } if len(m.ImagePullSecrets) > 0 { for _, msg := range m.ImagePullSecrets { @@ -7000,11 +7365,11 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n136, err := m.Affinity.MarshalTo(dAtA[i:]) + n142, err := m.Affinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n136 + i += n142 } dAtA[i] = 0x9a i++ @@ -7079,6 +7444,18 @@ func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) } + if m.DNSConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n143, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + } return i, nil } @@ -7133,11 +7510,11 @@ func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n137, err := m.StartTime.MarshalTo(dAtA[i:]) + n144, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n137 + i += n144 } if len(m.ContainerStatuses) > 0 { for _, msg := range m.ContainerStatuses { @@ -7188,19 +7565,19 @@ func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n138, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n145, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n138 + i += n145 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n139, err := m.Status.MarshalTo(dAtA[i:]) + n146, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n139 + i += n146 return i, nil } @@ -7222,19 +7599,19 @@ func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n140, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n147, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n140 + i += n147 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n141, err := m.Template.MarshalTo(dAtA[i:]) + n148, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n141 + i += n148 return i, nil } @@ -7256,11 +7633,11 @@ func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n142, err := m.ListMeta.MarshalTo(dAtA[i:]) + n149, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n142 + i += n149 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7294,19 +7671,19 @@ func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n143, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n150, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n143 + i += n150 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n144, err := m.Spec.MarshalTo(dAtA[i:]) + n151, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n144 + i += n151 return i, nil } @@ -7386,19 +7763,19 @@ func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n145, err := m.PodSignature.MarshalTo(dAtA[i:]) + n152, err := m.PodSignature.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n145 + i += n152 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n146, err := m.EvictionTime.MarshalTo(dAtA[i:]) + n153, err := m.EvictionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n146 + i += n153 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7431,11 +7808,11 @@ func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n147, err := m.Preference.MarshalTo(dAtA[i:]) + n154, err := m.Preference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n147 + i += n154 return i, nil } @@ -7457,11 +7834,11 @@ func (m *Probe) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n148, err := m.Handler.MarshalTo(dAtA[i:]) + n155, err := m.Handler.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n148 + i += n155 dAtA[i] = 0x10 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) @@ -7611,11 +7988,11 @@ func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n149, err := m.SecretRef.MarshalTo(dAtA[i:]) + n156, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n149 + i += n156 } dAtA[i] = 0x40 i++ @@ -7682,11 +8059,11 @@ func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x3a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n150, err := m.SecretRef.MarshalTo(dAtA[i:]) + n157, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n150 + i += n157 } dAtA[i] = 0x40 i++ @@ -7717,11 +8094,11 @@ func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n151, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n151 + i += n158 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) @@ -7753,27 +8130,27 @@ func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n152, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n152 + i += n159 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n153, err := m.Spec.MarshalTo(dAtA[i:]) + n160, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n153 + i += n160 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n154, err := m.Status.MarshalTo(dAtA[i:]) + n161, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n154 + i += n161 return i, nil } @@ -7803,11 +8180,11 @@ func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n155, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n162, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n155 + i += n162 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -7837,11 +8214,11 @@ func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n156, err := m.ListMeta.MarshalTo(dAtA[i:]) + n163, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n156 + i += n163 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -7903,11 +8280,11 @@ func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n157, err := m.Template.MarshalTo(dAtA[i:]) + n164, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n157 + i += n164 } dAtA[i] = 0x20 i++ @@ -7986,11 +8363,11 @@ func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n158, err := m.Divisor.MarshalTo(dAtA[i:]) + n165, err := m.Divisor.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n158 + i += n165 return i, nil } @@ -8012,27 +8389,27 @@ func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n159, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n159 + i += n166 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n160, err := m.Spec.MarshalTo(dAtA[i:]) + n167, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n160 + i += n167 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n161, err := m.Status.MarshalTo(dAtA[i:]) + n168, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n161 + i += n168 return i, nil } @@ -8054,11 +8431,11 @@ func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n162, err := m.ListMeta.MarshalTo(dAtA[i:]) + n169, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n162 + i += n169 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8113,11 +8490,11 @@ func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n163, err := (&v).MarshalTo(dAtA[i:]) + n170, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n163 + i += n170 } } if len(m.Scopes) > 0 { @@ -8177,11 +8554,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n164, err := (&v).MarshalTo(dAtA[i:]) + n171, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n164 + i += n171 } } if len(m.Used) > 0 { @@ -8208,11 +8585,11 @@ func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n165, err := (&v).MarshalTo(dAtA[i:]) + n172, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n165 + i += n172 } } return i, nil @@ -8257,11 +8634,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n166, err := (&v).MarshalTo(dAtA[i:]) + n173, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n166 + i += n173 } } if len(m.Requests) > 0 { @@ -8288,11 +8665,11 @@ func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n167, err := (&v).MarshalTo(dAtA[i:]) + n174, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n167 + i += n174 } } return i, nil @@ -8359,11 +8736,11 @@ func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n168, err := m.SecretRef.MarshalTo(dAtA[i:]) + n175, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n168 + i += n175 } dAtA[i] = 0x20 i++ @@ -8431,11 +8808,11 @@ func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n169, err := m.SecretRef.MarshalTo(dAtA[i:]) + n176, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n169 + i += n176 } dAtA[i] = 0x20 i++ @@ -8494,11 +8871,11 @@ func (m *Secret) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n170, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n177, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n170 + i += n177 if len(m.Data) > 0 { keysForData := make([]string, 0, len(m.Data)) for k := range m.Data { @@ -8574,11 +8951,11 @@ func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n171, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n178, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n171 + i += n178 if m.Optional != nil { dAtA[i] = 0x10 i++ @@ -8610,11 +8987,11 @@ func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n172, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n179, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n172 + i += n179 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) @@ -8650,11 +9027,11 @@ func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n173, err := m.ListMeta.MarshalTo(dAtA[i:]) + n180, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n173 + i += n180 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8688,11 +9065,11 @@ func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n174, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + n181, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n174 + i += n181 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -8812,11 +9189,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n175, err := m.Capabilities.MarshalTo(dAtA[i:]) + n182, err := m.Capabilities.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n175 + i += n182 } if m.Privileged != nil { dAtA[i] = 0x10 @@ -8832,11 +9209,11 @@ func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n176, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n183, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n176 + i += n183 } if m.RunAsUser != nil { dAtA[i] = 0x20 @@ -8894,11 +9271,11 @@ func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n177, err := m.Reference.MarshalTo(dAtA[i:]) + n184, err := m.Reference.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n177 + i += n184 return i, nil } @@ -8920,27 +9297,27 @@ func (m *Service) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n178, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n185, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n178 + i += n185 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n179, err := m.Spec.MarshalTo(dAtA[i:]) + n186, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n179 + i += n186 dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n180, err := m.Status.MarshalTo(dAtA[i:]) + n187, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n180 + i += n187 return i, nil } @@ -8962,11 +9339,11 @@ func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n181, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n188, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n181 + i += n188 if len(m.Secrets) > 0 { for _, msg := range m.Secrets { dAtA[i] = 0x12 @@ -9022,11 +9399,11 @@ func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n182, err := m.ListMeta.MarshalTo(dAtA[i:]) + n189, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n182 + i += n189 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9060,11 +9437,11 @@ func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n183, err := m.ListMeta.MarshalTo(dAtA[i:]) + n190, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n183 + i += n190 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -9109,11 +9486,11 @@ func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n184, err := m.TargetPort.MarshalTo(dAtA[i:]) + n191, err := m.TargetPort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n184 + i += n191 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) @@ -9260,11 +9637,11 @@ func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x72 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n185, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) + n192, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n185 + i += n192 } return i, nil } @@ -9287,11 +9664,11 @@ func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n186, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n193, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n186 + i += n193 return i, nil } @@ -9314,11 +9691,11 @@ func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n187, err := m.ClientIP.MarshalTo(dAtA[i:]) + n194, err := m.ClientIP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n187 + i += n194 } return i, nil } @@ -9362,11 +9739,11 @@ func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n188, err := m.SecretRef.MarshalTo(dAtA[i:]) + n195, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n188 + i += n195 } return i, nil } @@ -9410,11 +9787,11 @@ func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x2a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n189, err := m.SecretRef.MarshalTo(dAtA[i:]) + n196, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n189 + i += n196 } return i, nil } @@ -9463,11 +9840,11 @@ func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n190, err := m.Port.MarshalTo(dAtA[i:]) + n197, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n190 + i += n197 dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) @@ -9506,11 +9883,11 @@ func (m *Taint) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n191, err := m.TimeAdded.MarshalTo(dAtA[i:]) + n198, err := m.TimeAdded.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n191 + i += n198 } return i, nil } @@ -9576,11 +9953,37 @@ func (m *Volume) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n192, err := m.VolumeSource.MarshalTo(dAtA[i:]) + n199, err := m.VolumeSource.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n192 + i += n199 + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) return i, nil } @@ -9647,31 +10050,31 @@ func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n193, err := m.Secret.MarshalTo(dAtA[i:]) + n200, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n193 + i += n200 } if m.DownwardAPI != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n194, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n201, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n194 + i += n201 } if m.ConfigMap != nil { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n195, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n202, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n195 + i += n202 } return i, nil } @@ -9695,163 +10098,163 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n196, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n197, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n198, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n199, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n199 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n200, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n201, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n202, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n202 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n203, err := m.ISCSI.MarshalTo(dAtA[i:]) + n203, err := m.HostPath.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n203 } - if m.Glusterfs != nil { - dAtA[i] = 0x4a + if m.EmptyDir != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n204, err := m.Glusterfs.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n204, err := m.EmptyDir.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n204 } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n205, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n205, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n205 } - if m.RBD != nil { - dAtA[i] = 0x5a + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n206, err := m.RBD.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n206, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n206 } - if m.FlexVolume != nil { - dAtA[i] = 0x62 + if m.GitRepo != nil { + dAtA[i] = 0x2a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n207, err := m.FlexVolume.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n207, err := m.GitRepo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n207 } - if m.Cinder != nil { - dAtA[i] = 0x6a + if m.Secret != nil { + dAtA[i] = 0x32 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n208, err := m.Cinder.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n208, err := m.Secret.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n208 } - if m.CephFS != nil { - dAtA[i] = 0x72 + if m.NFS != nil { + dAtA[i] = 0x3a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n209, err := m.CephFS.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n209, err := m.NFS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n209 } - if m.Flocker != nil { - dAtA[i] = 0x7a + if m.ISCSI != nil { + dAtA[i] = 0x42 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n210, err := m.Flocker.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n210, err := m.ISCSI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n210 } + if m.Glusterfs != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n211, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n211 + } + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n212, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n212 + } + if m.RBD != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n213, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n213 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n214, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n214 + } + if m.Cinder != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n215, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n215 + } + if m.CephFS != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n216, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n216 + } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n217, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n217 + } if m.DownwardAPI != nil { dAtA[i] = 0x82 i++ dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + n218, err := m.DownwardAPI.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n211 + i += n218 } if m.FC != nil { dAtA[i] = 0x8a @@ -9859,11 +10262,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n212, err := m.FC.MarshalTo(dAtA[i:]) + n219, err := m.FC.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n212 + i += n219 } if m.AzureFile != nil { dAtA[i] = 0x92 @@ -9871,11 +10274,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n213, err := m.AzureFile.MarshalTo(dAtA[i:]) + n220, err := m.AzureFile.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n213 + i += n220 } if m.ConfigMap != nil { dAtA[i] = 0x9a @@ -9883,11 +10286,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n214, err := m.ConfigMap.MarshalTo(dAtA[i:]) + n221, err := m.ConfigMap.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n214 + i += n221 } if m.VsphereVolume != nil { dAtA[i] = 0xa2 @@ -9895,11 +10298,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n215, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + n222, err := m.VsphereVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n215 + i += n222 } if m.Quobyte != nil { dAtA[i] = 0xaa @@ -9907,11 +10310,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n216, err := m.Quobyte.MarshalTo(dAtA[i:]) + n223, err := m.Quobyte.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n216 + i += n223 } if m.AzureDisk != nil { dAtA[i] = 0xb2 @@ -9919,11 +10322,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n217, err := m.AzureDisk.MarshalTo(dAtA[i:]) + n224, err := m.AzureDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n217 + i += n224 } if m.PhotonPersistentDisk != nil { dAtA[i] = 0xba @@ -9931,11 +10334,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n218, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + n225, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n218 + i += n225 } if m.PortworxVolume != nil { dAtA[i] = 0xc2 @@ -9943,11 +10346,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n219, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + n226, err := m.PortworxVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n219 + i += n226 } if m.ScaleIO != nil { dAtA[i] = 0xca @@ -9955,11 +10358,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n220, err := m.ScaleIO.MarshalTo(dAtA[i:]) + n227, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n220 + i += n227 } if m.Projected != nil { dAtA[i] = 0xd2 @@ -9967,11 +10370,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n221, err := m.Projected.MarshalTo(dAtA[i:]) + n228, err := m.Projected.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n221 + i += n228 } if m.StorageOS != nil { dAtA[i] = 0xda @@ -9979,11 +10382,11 @@ func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n222, err := m.StorageOS.MarshalTo(dAtA[i:]) + n229, err := m.StorageOS.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n222 + i += n229 } return i, nil } @@ -10043,11 +10446,11 @@ func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n223, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) + n230, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n223 + i += n230 return i, nil } @@ -10191,6 +10594,17 @@ func (m *Binding) Size() (n int) { return n } +func (m *CSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + func (m *Capabilities) Size() (n int) { var l int _ = l @@ -10480,6 +10894,12 @@ func (m *Container) Size() (n int) { } l = len(m.TerminationMessagePolicy) n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -10822,6 +11242,22 @@ func (m *Event) Size() (n int) { n += 1 + sovGenerated(uint64(m.Count)) l = len(m.Type) n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) return n } @@ -10839,6 +11275,17 @@ func (m *EventList) Size() (n int) { return n } +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *EventSource) Size() (n int) { var l int _ = l @@ -11027,6 +11474,38 @@ func (m *HostPathVolumeSource) Size() (n int) { return n } +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *ISCSIVolumeSource) Size() (n int) { var l int _ = l @@ -11730,6 +12209,10 @@ func (m *PersistentVolumeClaimSpec) Size() (n int) { l = len(*m.StorageClassName) n += 1 + l + sovGenerated(uint64(l)) } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -11872,6 +12355,10 @@ func (m *PersistentVolumeSource) Size() (n int) { l = m.StorageOS.Size() n += 2 + l + sovGenerated(uint64(l)) } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -11909,6 +12396,10 @@ func (m *PersistentVolumeSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -12030,6 +12521,42 @@ func (m *PodCondition) Size() (n int) { return n } +func (m *PodDNSConfig) Size() (n int) { + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + func (m *PodExecOptions) Size() (n int) { var l int _ = l @@ -12227,6 +12754,10 @@ func (m *PodSpec) Size() (n int) { if m.Priority != nil { n += 2 + sovGenerated(uint64(*m.Priority)) } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } return n } @@ -13125,6 +13656,16 @@ func (m *Volume) Size() (n int) { return n } +func (m *VolumeDevice) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *VolumeMount) Size() (n int) { var l int _ = l @@ -13407,6 +13948,18 @@ func (this *Binding) String() string { }, "") return s } +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} func (this *Capabilities) String() string { if this == nil { return "nil" @@ -13610,6 +14163,7 @@ func (this *Container) String() string { `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -13883,6 +14437,12 @@ func (this *Event) String() string { `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `}`, }, "") return s @@ -13898,6 +14458,18 @@ func (this *EventList) String() string { }, "") return s } +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} func (this *EventSource) String() string { if this == nil { return "nil" @@ -14064,6 +14636,26 @@ func (this *HostPathVolumeSource) String() string { }, "") return s } +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} func (this *ISCSIVolumeSource) String() string { if this == nil { return "nil" @@ -14673,6 +15265,7 @@ func (this *PersistentVolumeClaimSpec) String() string { `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14733,7 +15326,7 @@ func (this *PersistentVolumeSource) String() string { `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, @@ -14748,6 +15341,7 @@ func (this *PersistentVolumeSource) String() string { `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, `}`, }, "") return s @@ -14774,6 +15368,7 @@ func (this *PersistentVolumeSpec) String() string { `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, `}`, }, "") return s @@ -14876,6 +15471,29 @@ func (this *PodCondition) String() string { }, "") return s } +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} func (this *PodExecOptions) String() string { if this == nil { return "nil" @@ -15003,6 +15621,7 @@ func (this *PodSpec) String() string { `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, `}`, }, "") return s @@ -15786,6 +16405,17 @@ func (this *Volume) String() string { }, "") return s } +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} func (this *VolumeMount) String() string { if this == nil { return "nil" @@ -16980,6 +17610,134 @@ func (m *Binding) Unmarshal(dAtA []byte) error { } return nil } +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -19541,6 +20299,37 @@ func (m *Container) Unmarshal(dAtA []byte) error { } m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22770,6 +23559,189 @@ func (m *Event) Unmarshal(dAtA []byte) error { } m.Type = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -22902,6 +23874,134 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } return nil } +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *EventSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -24735,6 +25835,343 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } return nil } +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -31838,6 +33275,36 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { s := string(dAtA[iNdEx:postIndex]) m.StorageClassName = &s iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32583,7 +34050,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} + m.ISCSI = &ISCSIPersistentVolumeSource{} } if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -33051,6 +34518,39 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIPersistentVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33401,6 +34901,36 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34557,6 +36087,254 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } return nil } +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *PodExecOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -36367,6 +38145,39 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } } m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -45543,6 +47354,114 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } return nil } +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *VolumeMount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -47195,749 +49114,777 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 11903 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x70, 0x24, 0xc7, - 0x79, 0x98, 0x66, 0x17, 0xaf, 0xfd, 0xf0, 0xee, 0xc3, 0x91, 0x7b, 0x20, 0xef, 0x70, 0x1c, 0x4a, - 0xe4, 0xf1, 0x05, 0x88, 0x47, 0x52, 0xa4, 0x45, 0x8a, 0x36, 0x80, 0x05, 0xee, 0xc0, 0x3b, 0xdc, - 0x2d, 0x7b, 0x71, 0x77, 0x22, 0x4d, 0x33, 0x1a, 0xec, 0x36, 0x16, 0x43, 0x0c, 0x66, 0x96, 0x33, - 0xb3, 0xb8, 0x03, 0xcb, 0xaa, 0x4a, 0x14, 0x59, 0x79, 0xc8, 0x3f, 0x54, 0x89, 0x2a, 0x71, 0x2c, - 0x95, 0x53, 0x95, 0x47, 0xd9, 0x8a, 0x93, 0x54, 0x1c, 0x39, 0xb2, 0x23, 0x39, 0x15, 0xc7, 0x79, - 0x94, 0xfc, 0x47, 0xb1, 0xf3, 0x47, 0xaa, 0x72, 0x05, 0xb1, 0x4e, 0xa9, 0xa4, 0xf2, 0x23, 0xa9, - 0x24, 0xfe, 0x65, 0xc4, 0x89, 0x52, 0xfd, 0x9c, 0xee, 0xd9, 0x99, 0xdd, 0xc5, 0x11, 0x07, 0x52, - 0x2a, 0xfd, 0xdb, 0xed, 0xef, 0xeb, 0xaf, 0x7b, 0xfa, 0xf1, 0xf5, 0xd7, 0x5f, 0x7f, 0x0f, 0x78, - 0x79, 0xe7, 0xa5, 0x68, 0xde, 0x0d, 0x16, 0x76, 0xda, 0x9b, 0x24, 0xf4, 0x49, 0x4c, 0xa2, 0x85, - 0x3d, 0xe2, 0x37, 0x82, 0x70, 0x41, 0x00, 0x9c, 0x96, 0xbb, 0x50, 0x0f, 0x42, 0xb2, 0xb0, 0xf7, - 0xec, 0x42, 0x93, 0xf8, 0x24, 0x74, 0x62, 0xd2, 0x98, 0x6f, 0x85, 0x41, 0x1c, 0x20, 0xc4, 0x71, - 0xe6, 0x9d, 0x96, 0x3b, 0x4f, 0x71, 0xe6, 0xf7, 0x9e, 0x9d, 0x7d, 0xa6, 0xe9, 0xc6, 0xdb, 0xed, - 0xcd, 0xf9, 0x7a, 0xb0, 0xbb, 0xd0, 0x0c, 0x9a, 0xc1, 0x02, 0x43, 0xdd, 0x6c, 0x6f, 0xb1, 0x7f, - 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xae, 0x27, 0xcd, 0x90, 0x3b, 0x31, 0xf1, 0x23, 0x37, 0xf0, - 0xa3, 0x67, 0x9c, 0x96, 0x1b, 0x91, 0x70, 0x8f, 0x84, 0x0b, 0xad, 0x9d, 0x26, 0x85, 0x45, 0x26, - 0xc2, 0xc2, 0xde, 0xb3, 0x9b, 0x24, 0x76, 0x3a, 0x7a, 0x34, 0xfb, 0x7c, 0x42, 0x6e, 0xd7, 0xa9, - 0x6f, 0xbb, 0x3e, 0x09, 0xf7, 0x25, 0x8d, 0x85, 0x90, 0x44, 0x41, 0x3b, 0xac, 0x93, 0x23, 0xd5, - 0x8a, 0x16, 0x76, 0x49, 0xec, 0x64, 0x7c, 0xfd, 0xec, 0x42, 0x5e, 0xad, 0xb0, 0xed, 0xc7, 0xee, - 0x6e, 0x67, 0x33, 0x9f, 0xe8, 0x55, 0x21, 0xaa, 0x6f, 0x93, 0x5d, 0xa7, 0xa3, 0xde, 0x73, 0x79, - 0xf5, 0xda, 0xb1, 0xeb, 0x2d, 0xb8, 0x7e, 0x1c, 0xc5, 0x61, 0xba, 0x92, 0xfd, 0x5d, 0x0b, 0xce, - 0x2f, 0xde, 0xaa, 0xad, 0x78, 0x4e, 0x14, 0xbb, 0xf5, 0x25, 0x2f, 0xa8, 0xef, 0xd4, 0xe2, 0x20, - 0x24, 0x37, 0x03, 0xaf, 0xbd, 0x4b, 0x6a, 0x6c, 0x20, 0xd0, 0xd3, 0x30, 0xb2, 0xc7, 0xfe, 0xaf, - 0x55, 0xca, 0xd6, 0x79, 0xeb, 0x42, 0x69, 0x69, 0xea, 0xdb, 0x07, 0x73, 0x1f, 0xb9, 0x7b, 0x30, - 0x37, 0x72, 0x53, 0x94, 0x63, 0x85, 0x81, 0x1e, 0x83, 0xa1, 0xad, 0x68, 0x63, 0xbf, 0x45, 0xca, - 0x05, 0x86, 0x3b, 0x21, 0x70, 0x87, 0x56, 0x6b, 0xb4, 0x14, 0x0b, 0x28, 0x5a, 0x80, 0x52, 0xcb, - 0x09, 0x63, 0x37, 0x76, 0x03, 0xbf, 0x5c, 0x3c, 0x6f, 0x5d, 0x18, 0x5c, 0x9a, 0x16, 0xa8, 0xa5, - 0xaa, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x4e, 0xe3, 0xba, 0xef, 0xed, 0x97, 0x07, 0xce, - 0x5b, 0x17, 0x46, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0xcb, 0x05, 0x18, 0x59, 0xdc, - 0xda, 0x72, 0x7d, 0x37, 0xde, 0x47, 0x37, 0x61, 0xcc, 0x0f, 0x1a, 0x44, 0xfe, 0x67, 0x5f, 0x31, - 0x7a, 0xf1, 0xfc, 0x7c, 0xe7, 0xca, 0x9c, 0xbf, 0xa6, 0xe1, 0x2d, 0x4d, 0xdd, 0x3d, 0x98, 0x1b, - 0xd3, 0x4b, 0xb0, 0x41, 0x07, 0x61, 0x18, 0x6d, 0x05, 0x0d, 0x45, 0xb6, 0xc0, 0xc8, 0xce, 0x65, - 0x91, 0xad, 0x26, 0x68, 0x4b, 0x93, 0x77, 0x0f, 0xe6, 0x46, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, - 0x30, 0x49, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x47, 0xf3, 0xe8, 0x6a, 0xa8, 0x4b, - 0xa7, 0xee, 0x1e, 0xcc, 0x4d, 0xa6, 0x0a, 0x71, 0x9a, 0xa0, 0xfd, 0x1e, 0x4c, 0x2c, 0xc6, 0xb1, - 0x53, 0xdf, 0x26, 0x0d, 0x3e, 0x83, 0xe8, 0x79, 0x18, 0xf0, 0x9d, 0x5d, 0x22, 0xe6, 0xf7, 0xbc, - 0x18, 0xd8, 0x81, 0x6b, 0xce, 0x2e, 0x39, 0x3c, 0x98, 0x9b, 0xba, 0xe1, 0xbb, 0xef, 0xb6, 0xc5, - 0xaa, 0xa0, 0x65, 0x98, 0x61, 0xa3, 0x8b, 0x00, 0x0d, 0xb2, 0xe7, 0xd6, 0x49, 0xd5, 0x89, 0xb7, - 0xc5, 0x7c, 0x23, 0x51, 0x17, 0x2a, 0x0a, 0x82, 0x35, 0x2c, 0xfb, 0x0e, 0x94, 0x16, 0xf7, 0x02, - 0xb7, 0x51, 0x0d, 0x1a, 0x11, 0xda, 0x81, 0xc9, 0x56, 0x48, 0xb6, 0x48, 0xa8, 0x8a, 0xca, 0xd6, - 0xf9, 0xe2, 0x85, 0xd1, 0x8b, 0x17, 0x32, 0x3f, 0xd6, 0x44, 0x5d, 0xf1, 0xe3, 0x70, 0x7f, 0xe9, - 0x41, 0xd1, 0xde, 0x64, 0x0a, 0x8a, 0xd3, 0x94, 0xed, 0x7f, 0x53, 0x80, 0xd3, 0x8b, 0xef, 0xb5, - 0x43, 0x52, 0x71, 0xa3, 0x9d, 0xf4, 0x0a, 0x6f, 0xb8, 0xd1, 0xce, 0xb5, 0x64, 0x04, 0xd4, 0xd2, - 0xaa, 0x88, 0x72, 0xac, 0x30, 0xd0, 0x33, 0x30, 0x4c, 0x7f, 0xdf, 0xc0, 0x6b, 0xe2, 0x93, 0x4f, - 0x09, 0xe4, 0xd1, 0x8a, 0x13, 0x3b, 0x15, 0x0e, 0xc2, 0x12, 0x07, 0xad, 0xc3, 0x68, 0x9d, 0x6d, - 0xc8, 0xe6, 0x7a, 0xd0, 0x20, 0x6c, 0x32, 0x4b, 0x4b, 0x4f, 0x51, 0xf4, 0xe5, 0xa4, 0xf8, 0xf0, - 0x60, 0xae, 0xcc, 0xfb, 0x26, 0x48, 0x68, 0x30, 0xac, 0xd7, 0x47, 0xb6, 0xda, 0x5f, 0x03, 0x8c, - 0x12, 0x64, 0xec, 0xad, 0x0b, 0xda, 0x56, 0x19, 0x64, 0x5b, 0x65, 0x2c, 0x7b, 0x9b, 0xa0, 0x67, - 0x61, 0x60, 0xc7, 0xf5, 0x1b, 0xe5, 0x21, 0x46, 0xeb, 0x2c, 0x9d, 0xf3, 0x2b, 0xae, 0xdf, 0x38, - 0x3c, 0x98, 0x9b, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc4, 0x82, 0x39, 0x06, 0x5b, - 0x75, 0x3d, 0x52, 0x25, 0x61, 0xe4, 0x46, 0x31, 0xf1, 0x63, 0x63, 0x40, 0x2f, 0x02, 0x44, 0xa4, - 0x1e, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0x6a, 0x0a, 0x82, 0x35, 0x2c, 0xca, 0x10, 0xa2, 0x6d, - 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x35, 0x09, 0xc0, 0x09, 0x8e, 0xc1, 0x10, 0x8a, - 0xbd, 0x18, 0x02, 0xfa, 0x14, 0x4c, 0x26, 0x8d, 0x45, 0x2d, 0xa7, 0x2e, 0x07, 0x90, 0x6d, 0x99, - 0x9a, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0x1f, 0x58, 0x62, 0xf1, 0xd0, 0xaf, 0xfe, 0x90, 0x7f, 0xab, - 0xfd, 0xdb, 0x16, 0x0c, 0x2f, 0xb9, 0x7e, 0xc3, 0xf5, 0x9b, 0xe8, 0x33, 0x30, 0x42, 0xcf, 0xa6, - 0x86, 0x13, 0x3b, 0x82, 0xef, 0x7d, 0x5c, 0xdb, 0x5b, 0xea, 0xa8, 0x98, 0x6f, 0xed, 0x34, 0x69, - 0x41, 0x34, 0x4f, 0xb1, 0xe9, 0x6e, 0xbb, 0xbe, 0xf9, 0x0e, 0xa9, 0xc7, 0xeb, 0x24, 0x76, 0x92, - 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0xae, 0xc0, 0x50, 0xec, 0x84, 0x4d, 0x12, 0x0b, 0x06, 0x98, - 0xc9, 0xa8, 0x78, 0x4d, 0x4c, 0x77, 0x24, 0xf1, 0xeb, 0x24, 0x39, 0x16, 0x36, 0x58, 0x55, 0x2c, - 0x48, 0xd8, 0x75, 0x18, 0x5b, 0x76, 0x5a, 0xce, 0xa6, 0xeb, 0xb9, 0xb1, 0x4b, 0x22, 0xf4, 0x38, - 0x14, 0x9d, 0x46, 0x83, 0x71, 0x85, 0xd2, 0xd2, 0xe9, 0xbb, 0x07, 0x73, 0xc5, 0xc5, 0x06, 0x5d, - 0x9e, 0xa0, 0xb0, 0xf6, 0x31, 0xc5, 0x40, 0x4f, 0xc2, 0x40, 0x23, 0x0c, 0x5a, 0xe5, 0x02, 0xc3, - 0x7c, 0x80, 0xae, 0xe4, 0x4a, 0x18, 0xb4, 0x52, 0xa8, 0x0c, 0xc7, 0xfe, 0xdd, 0x02, 0x3c, 0xbc, - 0x4c, 0x5a, 0xdb, 0xab, 0xb5, 0x9c, 0xf5, 0x7b, 0x01, 0x46, 0x76, 0x03, 0xdf, 0x8d, 0x83, 0x30, - 0x12, 0x4d, 0xb3, 0x0d, 0xb4, 0x2e, 0xca, 0xb0, 0x82, 0xa2, 0xf3, 0x30, 0xd0, 0x4a, 0x98, 0xdf, - 0x98, 0x64, 0x9c, 0x8c, 0xed, 0x31, 0x08, 0xc5, 0x68, 0x47, 0x24, 0x14, 0x1b, 0x5f, 0x61, 0xdc, - 0x88, 0x48, 0x88, 0x19, 0x24, 0x59, 0x41, 0x74, 0x6d, 0x89, 0x55, 0x99, 0x5a, 0x41, 0x14, 0x82, - 0x35, 0x2c, 0x54, 0x85, 0x12, 0xff, 0x87, 0xc9, 0x16, 0xdb, 0xe3, 0x39, 0xe3, 0x5e, 0x93, 0x48, - 0x62, 0xdc, 0xc7, 0xd9, 0x12, 0x93, 0x85, 0x38, 0x21, 0x62, 0x2c, 0xb1, 0xa1, 0x9e, 0x4b, 0xec, - 0x5b, 0x05, 0x40, 0x7c, 0x08, 0x7f, 0xc4, 0x06, 0xee, 0x46, 0xe7, 0xc0, 0x65, 0x1e, 0x36, 0x57, - 0x83, 0xba, 0xe3, 0xa5, 0x57, 0xed, 0x71, 0x8d, 0xde, 0x2f, 0x59, 0x80, 0x96, 0x5d, 0xbf, 0x41, - 0xc2, 0x13, 0x90, 0xb4, 0x8e, 0xc6, 0x3b, 0xae, 0xc2, 0xc4, 0xb2, 0xe7, 0x12, 0x3f, 0x5e, 0xab, - 0x2e, 0x07, 0xfe, 0x96, 0xdb, 0x44, 0x9f, 0x84, 0x09, 0x2a, 0x78, 0x06, 0xed, 0xb8, 0x46, 0xea, - 0x81, 0xcf, 0xce, 0x68, 0x2a, 0xae, 0xa1, 0xbb, 0x07, 0x73, 0x13, 0x1b, 0x06, 0x04, 0xa7, 0x30, - 0xed, 0x3f, 0xa2, 0x1f, 0x1a, 0xec, 0xb6, 0x02, 0x9f, 0xf8, 0xf1, 0x72, 0xe0, 0x37, 0xb8, 0x2c, - 0xf7, 0x49, 0x18, 0x88, 0x69, 0xc7, 0xf9, 0x47, 0x3e, 0x26, 0xa7, 0x96, 0x76, 0xf7, 0xf0, 0x60, - 0xee, 0x81, 0xce, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x53, 0x30, 0x14, 0xc5, 0x4e, 0xdc, 0x8e, - 0xc4, 0x67, 0x3f, 0x22, 0x3f, 0xbb, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x26, 0x55, 0x35, 0x5e, 0x84, - 0x45, 0x05, 0xf4, 0x04, 0x0c, 0xef, 0x92, 0x28, 0x72, 0x9a, 0xf2, 0x18, 0x9e, 0x14, 0x75, 0x87, - 0xd7, 0x79, 0x31, 0x96, 0x70, 0xf4, 0x28, 0x0c, 0x92, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0xc6, 0x05, - 0xe2, 0xe0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x7b, 0x0b, 0x26, 0x55, 0x5f, 0x79, 0x5b, 0x27, - 0xc0, 0x6f, 0xdf, 0x04, 0xa8, 0xcb, 0x0f, 0x8c, 0x18, 0xbf, 0x1b, 0xbd, 0xf8, 0x58, 0xd6, 0x12, - 0xee, 0x1c, 0xc6, 0x84, 0xb2, 0x2a, 0x8a, 0xb0, 0x46, 0xcd, 0xfe, 0x17, 0x16, 0x9c, 0x4a, 0x7d, - 0xd1, 0x55, 0x37, 0x8a, 0xd1, 0x5b, 0x1d, 0x5f, 0x35, 0xdf, 0xdf, 0x57, 0xd1, 0xda, 0xec, 0x9b, - 0xd4, 0x9a, 0x93, 0x25, 0xda, 0x17, 0x5d, 0x86, 0x41, 0x37, 0x26, 0xbb, 0xf2, 0x63, 0x1e, 0xed, - 0xfa, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0xac, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, - 0xbe, 0x6c, 0xd7, 0x9d, 0xd6, 0x09, 0xcc, 0xc5, 0x1a, 0x0c, 0x30, 0xea, 0xbc, 0xe3, 0x8f, 0x67, - 0x77, 0x5c, 0x74, 0x67, 0x9e, 0x0a, 0x53, 0x5c, 0x68, 0x55, 0xcc, 0x8c, 0x16, 0x61, 0x46, 0x62, - 0xf6, 0x45, 0x28, 0x29, 0x04, 0x34, 0x05, 0xc5, 0x1d, 0xc2, 0x2f, 0x2a, 0x25, 0x4c, 0x7f, 0xa2, - 0x19, 0x18, 0xdc, 0x73, 0xbc, 0xb6, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x64, 0xe1, 0x25, 0xcb, 0xfe, - 0x26, 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x4f, 0x30, 0x93, 0xf7, 0x60, 0xc6, 0xcb, 0xe0, 0x61, - 0x62, 0x20, 0xfa, 0xe7, 0x79, 0x0f, 0x8b, 0xbe, 0xce, 0x64, 0x41, 0x71, 0x66, 0x1b, 0xf4, 0x18, - 0x08, 0x5a, 0x74, 0x45, 0x39, 0x1e, 0xeb, 0xaf, 0x10, 0x40, 0xaf, 0x8b, 0x32, 0xac, 0xa0, 0x94, - 0x41, 0xcc, 0xa8, 0xce, 0x5f, 0x21, 0xfb, 0x35, 0xe2, 0x91, 0x7a, 0x1c, 0x84, 0x1f, 0x68, 0xf7, - 0xcf, 0xf2, 0xd1, 0xe7, 0xfc, 0x65, 0x54, 0x10, 0x28, 0x5e, 0x21, 0xfb, 0x7c, 0x2a, 0xf4, 0xaf, - 0x2b, 0x76, 0xfd, 0xba, 0xdf, 0xb0, 0x60, 0x5c, 0x7d, 0xdd, 0x09, 0x6c, 0xa4, 0x25, 0x73, 0x23, - 0x9d, 0xed, 0xba, 0x1e, 0x73, 0xb6, 0xd0, 0x0f, 0x19, 0x0b, 0x10, 0x38, 0xd5, 0x30, 0xa0, 0x43, - 0x43, 0x79, 0xf6, 0x07, 0x39, 0x21, 0xfd, 0x7c, 0xd7, 0x15, 0xb2, 0xbf, 0x11, 0x50, 0xf1, 0x21, - 0xfb, 0xbb, 0x8c, 0x59, 0x1b, 0xe8, 0x3a, 0x6b, 0xbf, 0x59, 0x80, 0xd3, 0x6a, 0x04, 0x8c, 0x03, - 0xfa, 0x47, 0x7d, 0x0c, 0x9e, 0x85, 0xd1, 0x06, 0xd9, 0x72, 0xda, 0x5e, 0xac, 0xee, 0xa2, 0x83, - 0x5c, 0x1f, 0x51, 0x49, 0x8a, 0xb1, 0x8e, 0x73, 0x84, 0x61, 0xfb, 0xb7, 0xc0, 0x78, 0x6f, 0xec, - 0xd0, 0x15, 0x4c, 0xa5, 0x37, 0x4d, 0xa3, 0x30, 0xa6, 0x6b, 0x14, 0x84, 0xf6, 0xe0, 0x51, 0x18, - 0x74, 0x77, 0xe9, 0x59, 0x5c, 0x30, 0x8f, 0xd8, 0x35, 0x5a, 0x88, 0x39, 0x0c, 0x7d, 0x0c, 0x86, - 0xeb, 0xc1, 0xee, 0xae, 0xe3, 0x37, 0xca, 0x45, 0x26, 0x4f, 0x8e, 0xd2, 0xe3, 0x7a, 0x99, 0x17, - 0x61, 0x09, 0x43, 0x0f, 0xc3, 0x80, 0x13, 0x36, 0xa3, 0xf2, 0x00, 0xc3, 0x19, 0xa1, 0x2d, 0x2d, - 0x86, 0xcd, 0x08, 0xb3, 0x52, 0x2a, 0x27, 0xde, 0x0e, 0xc2, 0x1d, 0xd7, 0x6f, 0x56, 0xdc, 0x90, - 0x09, 0x7d, 0x9a, 0x9c, 0x78, 0x4b, 0x41, 0xb0, 0x86, 0x85, 0x56, 0x61, 0xb0, 0x15, 0x84, 0x71, - 0x54, 0x1e, 0x62, 0xc3, 0xfd, 0x48, 0xce, 0x56, 0xe2, 0x5f, 0x5b, 0x0d, 0xc2, 0x38, 0xf9, 0x00, - 0xfa, 0x2f, 0xc2, 0xbc, 0x3a, 0xfa, 0x29, 0x28, 0x12, 0x7f, 0xaf, 0x3c, 0xcc, 0xa8, 0xcc, 0x66, - 0x51, 0x59, 0xf1, 0xf7, 0x6e, 0x3a, 0x61, 0xc2, 0x67, 0x56, 0xfc, 0x3d, 0x4c, 0xeb, 0xa0, 0x37, - 0xa0, 0x24, 0xb5, 0x91, 0x51, 0x79, 0x24, 0x7f, 0x89, 0x61, 0x81, 0x84, 0xc9, 0xbb, 0x6d, 0x37, - 0x24, 0xbb, 0xc4, 0x8f, 0xa3, 0xe4, 0x3e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x37, 0x60, 0x8c, - 0xcb, 0x91, 0xeb, 0x41, 0xdb, 0x8f, 0xa3, 0x72, 0x89, 0x75, 0x2f, 0x53, 0x75, 0x75, 0x33, 0xc1, - 0x5b, 0x9a, 0x11, 0x44, 0xc7, 0xb4, 0xc2, 0x08, 0x1b, 0xa4, 0x10, 0x86, 0x71, 0xcf, 0xdd, 0x23, - 0x3e, 0x89, 0xa2, 0x6a, 0x18, 0x6c, 0x92, 0x32, 0xb0, 0x9e, 0x9f, 0xc9, 0xd6, 0xe8, 0x04, 0x9b, - 0x64, 0x69, 0xfa, 0xee, 0xc1, 0xdc, 0xf8, 0x55, 0xbd, 0x0e, 0x36, 0x49, 0xa0, 0x1b, 0x30, 0x41, - 0x05, 0x54, 0x37, 0x21, 0x3a, 0xda, 0x8b, 0x28, 0x93, 0x4e, 0xb1, 0x51, 0x09, 0xa7, 0x88, 0xa0, - 0xd7, 0xa0, 0xe4, 0xb9, 0x5b, 0xa4, 0xbe, 0x5f, 0xf7, 0x48, 0x79, 0x8c, 0x51, 0xcc, 0xdc, 0x56, - 0x57, 0x25, 0x12, 0xbf, 0x00, 0xa8, 0xbf, 0x38, 0xa9, 0x8e, 0x6e, 0xc2, 0x03, 0x31, 0x09, 0x77, - 0x5d, 0xdf, 0xa1, 0xdb, 0x41, 0xc8, 0x93, 0x4c, 0x2f, 0x36, 0xce, 0xd6, 0xdb, 0x39, 0x31, 0x74, - 0x0f, 0x6c, 0x64, 0x62, 0xe1, 0x9c, 0xda, 0xe8, 0x3a, 0x4c, 0xb2, 0x9d, 0x50, 0x6d, 0x7b, 0x5e, - 0x35, 0xf0, 0xdc, 0xfa, 0x7e, 0x79, 0x82, 0x11, 0xfc, 0x98, 0x54, 0x7c, 0xad, 0x99, 0x60, 0x7a, - 0xe3, 0x4d, 0xfe, 0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0x14, 0x21, 0xed, 0xd0, 0x8d, 0xf7, 0xe9, 0xfa, - 0x25, 0x77, 0xe2, 0xf2, 0x64, 0xd7, 0xfb, 0xa3, 0x8e, 0xaa, 0xb4, 0x25, 0x7a, 0x21, 0x4e, 0x13, - 0xa4, 0x5b, 0x3b, 0x8a, 0x1b, 0xae, 0x5f, 0x9e, 0x62, 0x1c, 0x43, 0xed, 0x8c, 0x1a, 0x2d, 0xc4, - 0x1c, 0xc6, 0x94, 0x20, 0xf4, 0xc7, 0x75, 0xca, 0x41, 0xa7, 0x19, 0x62, 0xa2, 0x04, 0x91, 0x00, - 0x9c, 0xe0, 0xd0, 0x63, 0x39, 0x8e, 0xf7, 0xcb, 0x88, 0xa1, 0xaa, 0xed, 0xb2, 0xb1, 0xf1, 0x06, - 0xa6, 0xe5, 0xe8, 0x2a, 0x0c, 0x13, 0x7f, 0x6f, 0x35, 0x0c, 0x76, 0xcb, 0xa7, 0xf2, 0xf7, 0xec, - 0x0a, 0x47, 0xe1, 0x0c, 0x3d, 0xb9, 0x00, 0x88, 0x62, 0x2c, 0x49, 0xa0, 0x3b, 0x50, 0xce, 0x98, - 0x11, 0x3e, 0x01, 0x33, 0x6c, 0x02, 0x5e, 0x11, 0x75, 0xcb, 0x1b, 0x39, 0x78, 0x87, 0x5d, 0x60, - 0x38, 0x97, 0xba, 0xbd, 0x09, 0x13, 0x8a, 0xb1, 0xb0, 0xb9, 0x45, 0x73, 0x30, 0x48, 0x39, 0xa6, - 0xbc, 0x52, 0x97, 0xe8, 0x50, 0x32, 0xd5, 0x14, 0xe6, 0xe5, 0x6c, 0x28, 0xdd, 0xf7, 0xc8, 0xd2, - 0x7e, 0x4c, 0xf8, 0xb5, 0xa8, 0xa8, 0x0d, 0xa5, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x7f, 0x5c, 0x30, - 0x49, 0xb8, 0x57, 0x1f, 0xfc, 0xfa, 0x69, 0x18, 0xd9, 0x0e, 0xa2, 0x98, 0x62, 0xb3, 0x36, 0x06, - 0x13, 0x51, 0xe4, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0xbd, 0x0c, 0xe3, 0x75, 0xbd, 0x01, 0x71, 0xd8, - 0x9c, 0x16, 0x55, 0xcc, 0xd6, 0xb1, 0x89, 0x8b, 0x5e, 0x82, 0x11, 0xf6, 0x40, 0x51, 0x0f, 0x3c, - 0x71, 0x01, 0x93, 0x27, 0xe6, 0x48, 0x55, 0x94, 0x1f, 0x6a, 0xbf, 0xb1, 0xc2, 0xa6, 0x97, 0x62, - 0xda, 0x85, 0xb5, 0xaa, 0x60, 0xf3, 0xea, 0x52, 0x7c, 0x99, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x5a, - 0x41, 0x1b, 0x65, 0x7a, 0xa5, 0x20, 0xa8, 0x0a, 0xc3, 0xb7, 0x1d, 0x37, 0x76, 0xfd, 0xa6, 0x38, - 0xcf, 0x9f, 0xe8, 0xca, 0xf3, 0x59, 0xa5, 0x5b, 0xbc, 0x02, 0x3f, 0x95, 0xc4, 0x1f, 0x2c, 0xc9, - 0x50, 0x8a, 0x61, 0xdb, 0xf7, 0x29, 0xc5, 0x42, 0xbf, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, - 0x58, 0x92, 0x41, 0x6f, 0x01, 0xc8, 0x75, 0x43, 0x1a, 0xe2, 0x61, 0xe0, 0xe9, 0xde, 0x44, 0x37, - 0x54, 0x9d, 0xa5, 0x09, 0x7a, 0xe6, 0x25, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x99, 0xdc, 0xd3, 0xd9, - 0x19, 0xf4, 0xb3, 0x74, 0xab, 0x3a, 0x61, 0x4c, 0x1a, 0x8b, 0xb1, 0x18, 0x9c, 0x27, 0xfb, 0x13, - 0x5b, 0x37, 0xdc, 0x5d, 0xa2, 0x6f, 0x6b, 0x41, 0x04, 0x27, 0xf4, 0xec, 0xdf, 0x2a, 0x42, 0x39, - 0xaf, 0xbb, 0x74, 0xd1, 0x91, 0x3b, 0x6e, 0xbc, 0x4c, 0xc5, 0x15, 0xcb, 0x5c, 0x74, 0x2b, 0xa2, - 0x1c, 0x2b, 0x0c, 0x3a, 0xfb, 0x91, 0xdb, 0x94, 0xb7, 0x8e, 0xc1, 0x64, 0xf6, 0x6b, 0xac, 0x14, - 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, 0x97, 0x27, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, - 0x57, 0x18, 0x0c, 0xf4, 0x50, 0x18, 0x18, 0x43, 0x34, 0x78, 0xbc, 0x43, 0x84, 0xde, 0x06, 0xd8, - 0x72, 0x7d, 0x37, 0xda, 0x66, 0xd4, 0x87, 0x8e, 0x4c, 0x5d, 0x09, 0x3b, 0xab, 0x8a, 0x0a, 0xd6, - 0x28, 0xa2, 0x17, 0x60, 0x54, 0x6d, 0xc0, 0xb5, 0x4a, 0x79, 0xd8, 0x7c, 0xd6, 0x48, 0xb8, 0x51, - 0x05, 0xeb, 0x78, 0xf6, 0x3b, 0xe9, 0xf5, 0x22, 0x76, 0x80, 0x36, 0xbe, 0x56, 0xbf, 0xe3, 0x5b, - 0xe8, 0x3e, 0xbe, 0xf6, 0xef, 0x15, 0x61, 0xd2, 0x68, 0xac, 0x1d, 0xf5, 0xc1, 0xb3, 0x2e, 0xd1, - 0x83, 0xc8, 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xef, 0xad, 0xa2, 0x1f, 0x56, 0x74, 0x07, 0xf0, 0xfa, - 0xe8, 0x6d, 0x28, 0x79, 0x4e, 0xc4, 0x94, 0x0f, 0x44, 0xec, 0xbb, 0x7e, 0x88, 0x25, 0x82, 0xbe, - 0x13, 0xc5, 0xda, 0x59, 0xc0, 0x69, 0x27, 0x24, 0xe9, 0x89, 0x49, 0x85, 0x13, 0xf9, 0xb4, 0xa9, - 0x3a, 0x41, 0x25, 0x98, 0x7d, 0xcc, 0x61, 0xe8, 0x25, 0x18, 0x0b, 0x09, 0x5b, 0x15, 0xcb, 0x54, - 0xd6, 0x62, 0xcb, 0x6c, 0x30, 0x11, 0xca, 0xb0, 0x06, 0xc3, 0x06, 0x66, 0x22, 0x6b, 0x0f, 0x75, - 0x91, 0xb5, 0x9f, 0x80, 0x61, 0xf6, 0x43, 0xad, 0x00, 0x35, 0x1b, 0x6b, 0xbc, 0x18, 0x4b, 0x78, - 0x7a, 0xc1, 0x8c, 0xf4, 0xb9, 0x60, 0x9e, 0x84, 0x89, 0x8a, 0x43, 0x76, 0x03, 0x7f, 0xc5, 0x6f, - 0xb4, 0x02, 0xd7, 0x8f, 0x51, 0x19, 0x06, 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x00, 0xa5, 0x80, 0x07, - 0xa8, 0xe4, 0x6c, 0xff, 0x61, 0x01, 0xc6, 0x2b, 0xc4, 0x23, 0x31, 0xe1, 0x77, 0x8d, 0x08, 0xad, - 0x02, 0x6a, 0x86, 0x4e, 0x9d, 0x54, 0x49, 0xe8, 0x06, 0x0d, 0x5d, 0x19, 0x59, 0x64, 0x0a, 0x7f, - 0x74, 0xa9, 0x03, 0x8a, 0x33, 0x6a, 0xa0, 0x37, 0x61, 0xbc, 0x15, 0x12, 0x43, 0x87, 0x66, 0xe5, - 0x89, 0x0b, 0x55, 0x1d, 0x91, 0x4b, 0xaa, 0x46, 0x11, 0x36, 0x49, 0xa1, 0x9f, 0x81, 0xa9, 0x20, - 0x6c, 0x6d, 0x3b, 0x7e, 0x85, 0xb4, 0x88, 0xdf, 0xa0, 0xa2, 0xb8, 0xd0, 0x11, 0xcc, 0xdc, 0x3d, - 0x98, 0x9b, 0xba, 0x9e, 0x82, 0xe1, 0x0e, 0x6c, 0xf4, 0x26, 0x4c, 0xb7, 0xc2, 0xa0, 0xe5, 0x34, - 0xd9, 0x42, 0x11, 0x12, 0x07, 0xe7, 0x3e, 0x4f, 0xdf, 0x3d, 0x98, 0x9b, 0xae, 0xa6, 0x81, 0x87, - 0x07, 0x73, 0xa7, 0xd8, 0x40, 0xd1, 0x92, 0x04, 0x88, 0x3b, 0xc9, 0xd8, 0x4d, 0x38, 0x5d, 0x09, - 0x6e, 0xfb, 0xb7, 0x9d, 0xb0, 0xb1, 0x58, 0x5d, 0xd3, 0x2e, 0xf7, 0xd7, 0xe4, 0xe5, 0x92, 0x3f, - 0xbf, 0x66, 0x9e, 0x53, 0x5a, 0x4d, 0x2e, 0xfe, 0xaf, 0xba, 0x1e, 0xc9, 0x51, 0x22, 0xfc, 0xcd, - 0x82, 0xd1, 0x52, 0x82, 0xaf, 0xf4, 0xfe, 0x56, 0xae, 0xde, 0xff, 0x75, 0x18, 0xd9, 0x72, 0x89, - 0xd7, 0xc0, 0x64, 0x4b, 0xcc, 0xcc, 0xe3, 0xf9, 0x2f, 0x4a, 0xab, 0x14, 0x53, 0x2a, 0x8d, 0xf8, - 0xd5, 0x74, 0x55, 0x54, 0xc6, 0x8a, 0x0c, 0xda, 0x81, 0x29, 0x79, 0xf7, 0x91, 0x50, 0xb1, 0x89, - 0x9f, 0xe8, 0x76, 0xa1, 0x32, 0x89, 0xb3, 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x83, 0x30, 0xbd, 0x8b, - 0xee, 0xd2, 0xe3, 0x6a, 0x80, 0x2d, 0x69, 0x76, 0x17, 0x65, 0xd7, 0x6a, 0x56, 0x6a, 0x7f, 0xd5, - 0x82, 0x07, 0x3b, 0x46, 0x46, 0xa8, 0x17, 0x8e, 0x79, 0x16, 0xd2, 0xd7, 0xfd, 0x42, 0xef, 0xeb, - 0xbe, 0xfd, 0x0f, 0x2d, 0x98, 0x59, 0xd9, 0x6d, 0xc5, 0xfb, 0x15, 0xd7, 0x7c, 0x9b, 0x78, 0x11, - 0x86, 0x76, 0x49, 0xc3, 0x6d, 0xef, 0x8a, 0x99, 0x9b, 0x93, 0x2c, 0x7d, 0x9d, 0x95, 0x1e, 0x1e, - 0xcc, 0x8d, 0xd7, 0xe2, 0x20, 0x74, 0x9a, 0x84, 0x17, 0x60, 0x81, 0xce, 0x0e, 0x46, 0xf7, 0x3d, - 0x72, 0xd5, 0xdd, 0x75, 0xe5, 0x0b, 0x61, 0x57, 0x95, 0xd7, 0xbc, 0x1c, 0xd0, 0xf9, 0xd7, 0xdb, - 0x8e, 0x1f, 0xbb, 0xf1, 0xbe, 0x78, 0x76, 0x91, 0x44, 0x70, 0x42, 0xcf, 0xfe, 0xae, 0x05, 0x93, - 0x92, 0x97, 0x2c, 0x36, 0x1a, 0x21, 0x89, 0x22, 0x34, 0x0b, 0x05, 0xb7, 0x25, 0x7a, 0x09, 0xa2, - 0x97, 0x85, 0xb5, 0x2a, 0x2e, 0xb8, 0x2d, 0x54, 0x85, 0x12, 0x7f, 0x68, 0x4c, 0x16, 0x57, 0x5f, - 0xcf, 0x95, 0xac, 0x07, 0x1b, 0xb2, 0x26, 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, - 0xcd, 0xe6, 0xb2, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x80, 0x11, 0x3f, 0x68, 0xf0, 0x77, 0x5f, 0xbe, - 0xa7, 0xd9, 0x92, 0xbd, 0x26, 0xca, 0xb0, 0x82, 0xda, 0xbf, 0x68, 0xc1, 0x98, 0xfc, 0xb2, 0x3e, - 0x05, 0x74, 0xba, 0xb5, 0x12, 0xe1, 0x3c, 0xd9, 0x5a, 0x54, 0xc0, 0x66, 0x10, 0x43, 0xae, 0x2e, - 0x1e, 0x45, 0xae, 0xb6, 0xbf, 0x52, 0x80, 0x09, 0xd9, 0x9d, 0x5a, 0x7b, 0x33, 0x22, 0x31, 0xda, - 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, 0xfb, 0x68, 0xf6, 0x8d, 0xcb, 0x98, 0x9f, 0x44, 0xd4, - 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, 0x30, 0xed, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xdb, - 0xbb, 0x40, 0x9a, 0xfa, 0x19, 0x41, 0x7d, 0xfa, 0x5a, 0x9a, 0x0a, 0xee, 0x24, 0x8c, 0x56, 0xa4, - 0x96, 0xa7, 0xc8, 0x5a, 0x38, 0xdf, 0xad, 0x85, 0x7c, 0x25, 0x8f, 0xfd, 0x3b, 0x16, 0x94, 0x24, - 0xda, 0x49, 0x3c, 0x01, 0xad, 0xc3, 0x70, 0xc4, 0x26, 0x41, 0x0e, 0x8d, 0xdd, 0xad, 0xe3, 0x7c, - 0xbe, 0x92, 0xd3, 0x9c, 0xff, 0x8f, 0xb0, 0xa4, 0xc1, 0xd4, 0xd4, 0xaa, 0xfb, 0x1f, 0x12, 0x35, - 0xb5, 0xea, 0x4f, 0xce, 0x09, 0xf3, 0x5f, 0x59, 0x9f, 0xb5, 0xbb, 0x3c, 0x15, 0x3a, 0x5b, 0x21, - 0xd9, 0x72, 0xef, 0xa4, 0x85, 0xce, 0x2a, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x82, 0xb1, 0xba, 0xd4, - 0xee, 0x26, 0x6c, 0xe0, 0xb1, 0xae, 0xba, 0x72, 0xf5, 0xac, 0xc2, 0x6d, 0xc2, 0x96, 0xb5, 0xfa, - 0xd8, 0xa0, 0x66, 0x3e, 0xcc, 0x17, 0x7b, 0x3d, 0xcc, 0x27, 0x74, 0x73, 0x9f, 0x96, 0xed, 0x5f, - 0xb1, 0x60, 0x88, 0xeb, 0x08, 0xfb, 0x53, 0xaa, 0x6a, 0xcf, 0x44, 0xc9, 0xd8, 0xdd, 0xa4, 0x85, - 0xe2, 0xd5, 0x08, 0xad, 0x43, 0x89, 0xfd, 0x60, 0xba, 0x92, 0x62, 0xbe, 0x31, 0x1c, 0x6f, 0x55, - 0xef, 0xe0, 0x4d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x2f, 0x17, 0x29, 0xab, 0x4a, 0x50, 0x8d, 0x13, - 0xdc, 0xba, 0x7f, 0x27, 0x78, 0xe1, 0x7e, 0x9d, 0xe0, 0x4d, 0x98, 0xac, 0x6b, 0x6f, 0x52, 0xc9, - 0x4c, 0x5e, 0xe8, 0xba, 0x48, 0xb4, 0xe7, 0x2b, 0xae, 0x27, 0x5b, 0x36, 0x89, 0xe0, 0x34, 0x55, - 0xf4, 0xb3, 0x30, 0xc6, 0xe7, 0x59, 0xb4, 0x32, 0xc0, 0x5a, 0xf9, 0x58, 0xfe, 0x7a, 0xd1, 0x9b, - 0x60, 0x2b, 0xb1, 0xa6, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x0b, 0x83, 0x30, 0xb8, 0xb2, 0x47, 0xfc, - 0xf8, 0x04, 0x18, 0x52, 0x1d, 0x26, 0x5c, 0x7f, 0x2f, 0xf0, 0xf6, 0x48, 0x83, 0xc3, 0x8f, 0x72, - 0xb8, 0x3e, 0x20, 0x48, 0x4f, 0xac, 0x19, 0x24, 0x70, 0x8a, 0xe4, 0xfd, 0xb8, 0xb5, 0x5f, 0x82, - 0x21, 0x3e, 0xf7, 0xe2, 0xca, 0x9e, 0xa9, 0x01, 0x67, 0x83, 0x28, 0x76, 0x41, 0xa2, 0x51, 0xe0, - 0x2a, 0x77, 0x51, 0x1d, 0xbd, 0x03, 0x13, 0x5b, 0x6e, 0x18, 0xc5, 0xf4, 0xba, 0x1d, 0xc5, 0xce, - 0x6e, 0xeb, 0x1e, 0x6e, 0xe9, 0x6a, 0x1c, 0x56, 0x0d, 0x4a, 0x38, 0x45, 0x19, 0x35, 0x61, 0x9c, - 0x5e, 0x1c, 0x93, 0xa6, 0x86, 0x8f, 0xdc, 0x94, 0x52, 0xc3, 0x5d, 0xd5, 0x09, 0x61, 0x93, 0x2e, - 0x65, 0x26, 0x75, 0x76, 0xd1, 0x1c, 0x61, 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, - 0x27, 0x31, 0x5b, 0x8e, 0x92, 0xc9, 0x93, 0x12, 0x8b, 0x0d, 0xfb, 0x6b, 0xf4, 0x74, 0xa4, 0x63, - 0x78, 0x02, 0x47, 0xcb, 0xab, 0xe6, 0xd1, 0x72, 0x26, 0x77, 0x3e, 0x73, 0x8e, 0x95, 0xcf, 0xc0, - 0xa8, 0x36, 0xdd, 0x68, 0x01, 0x4a, 0x75, 0x69, 0x78, 0x20, 0xb8, 0xae, 0x12, 0x5f, 0x94, 0x45, - 0x02, 0x4e, 0x70, 0xe8, 0x68, 0x50, 0x61, 0x2f, 0x6d, 0xd6, 0x44, 0x45, 0x41, 0xcc, 0x20, 0xf6, - 0x73, 0x00, 0x2b, 0x77, 0x48, 0x7d, 0x91, 0x5f, 0xbc, 0xb4, 0xf7, 0x2d, 0x2b, 0xff, 0x7d, 0xcb, - 0xfe, 0x0f, 0x16, 0x4c, 0xac, 0x2e, 0x1b, 0x02, 0xf9, 0x3c, 0x00, 0x97, 0x42, 0x6f, 0xdd, 0xba, - 0x26, 0x35, 0xc3, 0x5c, 0xb9, 0xa7, 0x4a, 0xb1, 0x86, 0x81, 0xce, 0x40, 0xd1, 0x6b, 0xfb, 0x42, - 0x38, 0x1c, 0xbe, 0x7b, 0x30, 0x57, 0xbc, 0xda, 0xf6, 0x31, 0x2d, 0xd3, 0x2c, 0x89, 0x8a, 0x7d, - 0x5b, 0x12, 0xf5, 0x34, 0xc1, 0x46, 0x73, 0x30, 0x78, 0xfb, 0xb6, 0xdb, 0x88, 0xca, 0x83, 0x89, - 0xd6, 0xfa, 0xd6, 0xad, 0xb5, 0x4a, 0x84, 0x79, 0xb9, 0xfd, 0x17, 0x8a, 0x30, 0xb5, 0xea, 0x91, - 0x3b, 0xc6, 0x67, 0x3d, 0x06, 0x43, 0x8d, 0xd0, 0xdd, 0x23, 0x61, 0xfa, 0x14, 0xaf, 0xb0, 0x52, - 0x2c, 0xa0, 0x7d, 0x5b, 0x3f, 0xdd, 0xe8, 0x3c, 0x8f, 0x8f, 0xdb, 0xde, 0xab, 0xf7, 0x50, 0xbc, - 0x05, 0xc3, 0xfc, 0x99, 0x94, 0x0f, 0xc6, 0xe8, 0xc5, 0x67, 0xb3, 0xba, 0x90, 0x1e, 0x8b, 0x79, - 0xa1, 0xf8, 0xe0, 0x36, 0x23, 0x8a, 0x89, 0x89, 0x52, 0x2c, 0x49, 0xce, 0x7e, 0x12, 0xc6, 0x74, - 0xcc, 0x23, 0x19, 0x8f, 0xfc, 0x45, 0x0b, 0x4e, 0xad, 0x7a, 0x41, 0x7d, 0x27, 0x65, 0x8a, 0xf6, - 0x02, 0x8c, 0xd2, 0xfd, 0x14, 0x19, 0x66, 0xad, 0x86, 0xa1, 0xb3, 0x00, 0x61, 0x1d, 0x4f, 0xab, - 0x76, 0xe3, 0xc6, 0x5a, 0x25, 0xcb, 0x3e, 0x5a, 0x80, 0xb0, 0x8e, 0x67, 0x7f, 0xc7, 0x82, 0xb3, - 0x97, 0x96, 0x57, 0x12, 0x6b, 0xcc, 0x0e, 0x13, 0x6d, 0x2a, 0xdc, 0x35, 0xb4, 0xae, 0x24, 0xc2, - 0x5d, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2c, 0xee, 0x07, 0xbf, 0x66, 0xc1, 0xa9, 0x4b, 0x6e, 0x8c, - 0x49, 0x2b, 0x48, 0x1b, 0x0b, 0x87, 0xa4, 0x15, 0x44, 0x6e, 0x1c, 0x84, 0xfb, 0x69, 0x63, 0x61, - 0xac, 0x20, 0x58, 0xc3, 0xe2, 0x2d, 0xef, 0xb9, 0x11, 0xed, 0x69, 0xc1, 0xbc, 0x61, 0x62, 0x51, - 0x8e, 0x15, 0x06, 0xfd, 0xb0, 0x86, 0x1b, 0x32, 0x09, 0x61, 0x5f, 0x6c, 0x67, 0xf5, 0x61, 0x15, - 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x55, 0x0b, 0x4e, 0x5f, 0xf2, 0xda, 0x51, 0x4c, 0xc2, 0xad, 0xc8, - 0xe8, 0xec, 0x73, 0x50, 0x22, 0x52, 0x0a, 0x17, 0x7d, 0x55, 0xe7, 0x86, 0x12, 0xcf, 0xb9, 0xa5, - 0xb2, 0xc2, 0xeb, 0xc3, 0xae, 0xf3, 0x68, 0xf6, 0x88, 0x5f, 0x2f, 0xc0, 0xf8, 0xe5, 0x8d, 0x8d, - 0xea, 0x25, 0x12, 0x0b, 0x96, 0xd9, 0x5b, 0x83, 0x84, 0xb5, 0x8b, 0x70, 0x37, 0x59, 0xa7, 0x1d, - 0xbb, 0xde, 0x3c, 0x77, 0x8d, 0x99, 0x5f, 0xf3, 0xe3, 0xeb, 0x61, 0x2d, 0x0e, 0x5d, 0xbf, 0x99, - 0x79, 0x75, 0x96, 0x8c, 0xbd, 0x98, 0xc7, 0xd8, 0xd1, 0x73, 0x30, 0xc4, 0x7c, 0x73, 0xa4, 0xd4, - 0xf1, 0x90, 0x12, 0x15, 0x58, 0xe9, 0xe1, 0xc1, 0x5c, 0xe9, 0x06, 0x5e, 0xe3, 0x7f, 0xb0, 0x40, - 0x45, 0x37, 0x60, 0x74, 0x3b, 0x8e, 0x5b, 0x97, 0x89, 0xd3, 0x20, 0xa1, 0xe4, 0x0e, 0xe7, 0xb2, - 0xb8, 0x03, 0x1d, 0x04, 0x8e, 0x96, 0x6c, 0xa8, 0xa4, 0x2c, 0xc2, 0x3a, 0x1d, 0xbb, 0x06, 0x90, - 0xc0, 0x8e, 0xe9, 0xda, 0x60, 0xff, 0xc0, 0x82, 0xe1, 0xcb, 0x8e, 0xdf, 0xf0, 0x48, 0x88, 0x5e, - 0x81, 0x01, 0x72, 0x87, 0xd4, 0xc5, 0x09, 0x9e, 0xd9, 0xe1, 0xe4, 0x94, 0xe3, 0x4a, 0x30, 0xfa, - 0x1f, 0xb3, 0x5a, 0xe8, 0x32, 0x0c, 0xd3, 0xde, 0x5e, 0x52, 0x36, 0xe3, 0x8f, 0xe4, 0x7d, 0xb1, - 0x9a, 0x76, 0x7e, 0x30, 0x8a, 0x22, 0x2c, 0xab, 0x33, 0x85, 0x4e, 0xbd, 0x55, 0xa3, 0x0c, 0x2c, - 0xee, 0x76, 0xdd, 0xda, 0x58, 0xae, 0x72, 0x24, 0x41, 0x8d, 0x2b, 0x74, 0x64, 0x21, 0x4e, 0x88, - 0xd8, 0x1b, 0x50, 0xa2, 0x93, 0xba, 0xe8, 0xb9, 0x4e, 0x77, 0x5d, 0xd2, 0x53, 0x50, 0x92, 0x7a, - 0x9d, 0x48, 0x98, 0x9d, 0x33, 0xaa, 0x52, 0xed, 0x13, 0xe1, 0x04, 0x6e, 0x6f, 0xc1, 0x0c, 0x7b, - 0x24, 0x75, 0xe2, 0x6d, 0x63, 0x8f, 0xf5, 0x5e, 0xcc, 0x4f, 0x0b, 0xf9, 0x8a, 0xcf, 0x4c, 0x59, - 0xb3, 0x93, 0x1d, 0x93, 0x14, 0x35, 0x59, 0xeb, 0x3f, 0x0f, 0xc0, 0xf4, 0x5a, 0x6d, 0xb9, 0x66, - 0x2a, 0x16, 0x5f, 0x82, 0x31, 0x2e, 0x09, 0xd0, 0x05, 0xed, 0x78, 0xa2, 0x35, 0xf5, 0x70, 0xb0, - 0xa1, 0xc1, 0xb0, 0x81, 0x89, 0xce, 0x42, 0xd1, 0x7d, 0xd7, 0x4f, 0x9b, 0xc2, 0xad, 0xbd, 0x7e, - 0x0d, 0xd3, 0x72, 0x0a, 0xa6, 0x42, 0x05, 0x67, 0xa0, 0x0a, 0xac, 0x04, 0x8b, 0x57, 0x61, 0xc2, - 0x8d, 0xea, 0x91, 0xbb, 0xe6, 0x53, 0xee, 0x92, 0xf8, 0x5c, 0x24, 0x12, 0x3f, 0xed, 0xaa, 0x82, - 0xe2, 0x14, 0xb6, 0xc6, 0xcd, 0x07, 0xfb, 0x16, 0x4c, 0x7a, 0x5a, 0x5f, 0x53, 0x99, 0xab, 0xc5, - 0xbe, 0x2e, 0x62, 0x66, 0x39, 0x42, 0xe6, 0xe2, 0x1f, 0x1c, 0x61, 0x09, 0x43, 0x97, 0x60, 0xba, - 0xbe, 0xed, 0xb4, 0x16, 0xdb, 0xf1, 0x76, 0xc5, 0x8d, 0xea, 0xc1, 0x1e, 0x09, 0xf7, 0x99, 0x24, - 0x3c, 0x92, 0x28, 0x99, 0x14, 0x60, 0xf9, 0xf2, 0x62, 0x95, 0x62, 0xe2, 0xce, 0x3a, 0xa6, 0x08, - 0x02, 0xc7, 0x26, 0x82, 0x2c, 0xc2, 0xa4, 0x6c, 0xab, 0x46, 0x22, 0x76, 0x3c, 0x8c, 0xb2, 0xde, - 0x29, 0x97, 0x28, 0x51, 0xac, 0xfa, 0x96, 0xc6, 0x47, 0x2f, 0xc2, 0xb8, 0xeb, 0xbb, 0xb1, 0xeb, - 0xc4, 0x41, 0xc8, 0x0e, 0xd7, 0x31, 0x7e, 0x60, 0x50, 0x0e, 0xbf, 0xa6, 0x03, 0xb0, 0x89, 0x67, - 0xbf, 0x03, 0x25, 0x65, 0x6b, 0x26, 0xcd, 0x25, 0xad, 0x1c, 0x73, 0xc9, 0xde, 0x27, 0x82, 0xd4, - 0x98, 0x17, 0x33, 0x35, 0xe6, 0x7f, 0xcb, 0x82, 0xc4, 0xe4, 0x06, 0x5d, 0x86, 0x52, 0x2b, 0x60, - 0xaf, 0x66, 0xa1, 0x7c, 0x8a, 0x7e, 0x28, 0x93, 0x79, 0x70, 0x46, 0xc5, 0xc7, 0xaf, 0x2a, 0x6b, - 0xe0, 0xa4, 0x32, 0x5a, 0x82, 0xe1, 0x56, 0x48, 0x6a, 0x31, 0x73, 0x1a, 0xe9, 0x49, 0x87, 0xaf, - 0x11, 0x8e, 0x8f, 0x65, 0x45, 0xfb, 0x37, 0x2d, 0x00, 0xae, 0x94, 0x76, 0xfc, 0x26, 0x39, 0x81, - 0x8b, 0x76, 0x05, 0x06, 0xa2, 0x16, 0xa9, 0x77, 0x7b, 0xcf, 0x4c, 0xfa, 0x53, 0x6b, 0x91, 0x7a, - 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0x5f, 0x00, 0x98, 0x48, 0xd0, 0xe8, 0x05, 0x08, 0x3d, - 0x63, 0x98, 0xe4, 0x9f, 0x49, 0x99, 0xe4, 0x97, 0x18, 0xb6, 0x66, 0x85, 0xff, 0x0e, 0x14, 0x77, - 0x9d, 0x3b, 0xe2, 0x96, 0xf5, 0x54, 0xf7, 0x6e, 0x50, 0xfa, 0xf3, 0xeb, 0xce, 0x1d, 0x2e, 0xc7, - 0x3e, 0x25, 0x17, 0xc8, 0xba, 0x73, 0xe7, 0x90, 0xbf, 0x5a, 0x32, 0x26, 0x45, 0x2f, 0x73, 0x9f, - 0xfb, 0x4f, 0xc9, 0x7f, 0xb6, 0xec, 0x68, 0x23, 0xac, 0x2d, 0xd7, 0x17, 0x2a, 0xda, 0xbe, 0xda, - 0x72, 0xfd, 0x74, 0x5b, 0xae, 0xdf, 0x47, 0x5b, 0xae, 0x8f, 0xde, 0x83, 0x61, 0xf1, 0x1c, 0xc2, - 0x6c, 0x09, 0x47, 0x2f, 0x2e, 0xf4, 0xd1, 0x9e, 0x78, 0x4d, 0xe1, 0x6d, 0x2e, 0x48, 0x39, 0x5d, - 0x94, 0xf6, 0x6c, 0x57, 0x36, 0x88, 0xfe, 0x86, 0x05, 0x13, 0xe2, 0x37, 0x26, 0xef, 0xb6, 0x49, - 0x14, 0x0b, 0x79, 0xe0, 0x13, 0xfd, 0xf7, 0x41, 0x54, 0xe4, 0x5d, 0xf9, 0x84, 0x64, 0xb3, 0x26, - 0xb0, 0x67, 0x8f, 0x52, 0xbd, 0x40, 0xff, 0xd8, 0x82, 0x99, 0x5d, 0xe7, 0x0e, 0x6f, 0x91, 0x97, - 0x61, 0x27, 0x76, 0x03, 0x61, 0x1b, 0xf9, 0x4a, 0x7f, 0xd3, 0xdf, 0x51, 0x9d, 0x77, 0x52, 0x9a, - 0x51, 0xcd, 0x64, 0xa1, 0xf4, 0xec, 0x6a, 0x66, 0xbf, 0x66, 0xb7, 0x60, 0x44, 0xae, 0xb7, 0x8c, - 0xdb, 0x50, 0x45, 0x17, 0x76, 0x8e, 0xfc, 0x1a, 0xa5, 0xdd, 0x9e, 0x58, 0x3b, 0x62, 0xad, 0xdd, - 0xd7, 0x76, 0xde, 0x81, 0x31, 0x7d, 0x8d, 0xdd, 0xd7, 0xb6, 0xde, 0x85, 0x53, 0x19, 0x6b, 0xe9, - 0xbe, 0x36, 0x79, 0x1b, 0xce, 0xe4, 0xae, 0x8f, 0xfb, 0xd9, 0xb0, 0xfd, 0x75, 0x4b, 0xe7, 0x83, - 0x27, 0xa0, 0x9e, 0x5a, 0x36, 0xd5, 0x53, 0xe7, 0xba, 0xef, 0x9c, 0x1c, 0x1d, 0xd5, 0x5b, 0x7a, - 0xa7, 0x29, 0x57, 0x47, 0xaf, 0xc1, 0x90, 0x47, 0x4b, 0xe4, 0x3b, 0x9c, 0xdd, 0x7b, 0x47, 0x26, - 0xb2, 0x14, 0x2b, 0x8f, 0xb0, 0xa0, 0x60, 0xff, 0xb6, 0x05, 0x03, 0x27, 0x30, 0x12, 0xd8, 0x1c, - 0x89, 0x67, 0x72, 0x49, 0x8b, 0xb8, 0x07, 0xf3, 0xd8, 0xb9, 0xbd, 0x22, 0x63, 0x3b, 0xe4, 0x0c, - 0xcc, 0xff, 0x2d, 0xc0, 0x28, 0x6d, 0x4a, 0x1a, 0x8c, 0xbc, 0x0c, 0xe3, 0x9e, 0xb3, 0x49, 0x3c, - 0xa9, 0x32, 0x4f, 0x5f, 0x62, 0xaf, 0xea, 0x40, 0x6c, 0xe2, 0xd2, 0xca, 0x5b, 0xfa, 0xeb, 0x81, - 0x90, 0x5f, 0x54, 0x65, 0xe3, 0x69, 0x01, 0x9b, 0xb8, 0xf4, 0x3e, 0x75, 0xdb, 0x89, 0xeb, 0xdb, - 0xe2, 0x82, 0xab, 0xba, 0x7b, 0x8b, 0x16, 0x62, 0x0e, 0xa3, 0x02, 0x9c, 0x5c, 0x9d, 0x37, 0x49, - 0xc8, 0x04, 0x38, 0x2e, 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0xf0, 0xcd, 0x1b, - 0x64, 0xe6, 0x30, 0x7d, 0xf8, 0xe6, 0xa1, 0x2a, 0xcc, 0xb8, 0x7e, 0xdd, 0x6b, 0x37, 0xc8, 0x0d, - 0x9f, 0x4b, 0x77, 0x9e, 0xfb, 0x1e, 0x69, 0x08, 0x01, 0x5a, 0x59, 0x2e, 0xad, 0x65, 0xe0, 0xe0, - 0xcc, 0x9a, 0xf6, 0x9f, 0x83, 0x53, 0x57, 0x03, 0xa7, 0xb1, 0xe4, 0x78, 0x8e, 0x5f, 0x27, 0xe1, - 0x9a, 0xdf, 0xec, 0xf9, 0x20, 0xaf, 0x3f, 0x9f, 0x17, 0x7a, 0x3d, 0x9f, 0xdb, 0xdb, 0x80, 0xf4, - 0x06, 0x84, 0x19, 0x18, 0x86, 0x61, 0x97, 0x37, 0x25, 0x96, 0xff, 0xe3, 0xd9, 0xd2, 0x75, 0x47, - 0xcf, 0x34, 0x03, 0x27, 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x25, 0xc8, 0xf4, 0xcd, 0xe8, 0x7d, 0x95, - 0xb6, 0x5f, 0x80, 0x69, 0x56, 0xf3, 0x68, 0xd7, 0x3c, 0xfb, 0xaf, 0x58, 0x30, 0x79, 0x2d, 0xe5, - 0x4d, 0xfb, 0x18, 0x0c, 0xf1, 0x08, 0x27, 0x69, 0xa5, 0x57, 0x8d, 0x95, 0x62, 0x01, 0x3d, 0x76, - 0x9d, 0xcb, 0x0f, 0x2d, 0x28, 0x29, 0xd7, 0xf7, 0x13, 0x10, 0x6a, 0x97, 0x0d, 0xa1, 0x36, 0x53, - 0x17, 0xa0, 0xba, 0x93, 0x27, 0xd3, 0xa2, 0x2b, 0xca, 0x2f, 0xb4, 0x8b, 0x1a, 0x20, 0x21, 0xc3, - 0xbd, 0x08, 0x27, 0x4c, 0xe7, 0x51, 0xe9, 0x29, 0xca, 0x5e, 0xc4, 0x15, 0xee, 0x87, 0xe4, 0x45, - 0x5c, 0xf5, 0x27, 0x87, 0xfb, 0x55, 0xb5, 0x2e, 0xb3, 0x53, 0xe1, 0xa7, 0x99, 0xd5, 0x28, 0xdb, - 0x9b, 0xca, 0x1d, 0x7b, 0x4e, 0x58, 0x81, 0x8a, 0xd2, 0x43, 0xc6, 0xc8, 0xc4, 0x3f, 0x1e, 0xa6, - 0x20, 0xa9, 0x62, 0x5f, 0x86, 0xc9, 0xd4, 0x80, 0xa1, 0x17, 0x60, 0xb0, 0xb5, 0xed, 0x44, 0x24, - 0x65, 0x05, 0x34, 0x58, 0xa5, 0x85, 0x87, 0x07, 0x73, 0x13, 0xaa, 0x02, 0x2b, 0xc1, 0x1c, 0xdb, - 0xfe, 0x9f, 0x16, 0x0c, 0x5c, 0x0b, 0x1a, 0x27, 0xb1, 0x98, 0x5e, 0x35, 0x16, 0xd3, 0xc3, 0x79, - 0x41, 0x5e, 0x72, 0xd7, 0xd1, 0x6a, 0x6a, 0x1d, 0x9d, 0xcb, 0xa5, 0xd0, 0x7d, 0x09, 0xed, 0xc2, - 0x28, 0x0b, 0x1d, 0x23, 0xac, 0x92, 0x9e, 0x33, 0xee, 0x57, 0x73, 0xa9, 0xfb, 0xd5, 0xa4, 0x86, - 0xaa, 0xdd, 0xb2, 0x9e, 0x80, 0x61, 0x61, 0x19, 0x93, 0xb6, 0x8f, 0x15, 0xb8, 0x58, 0xc2, 0xed, - 0x5f, 0x29, 0x82, 0x11, 0xaa, 0x06, 0xfd, 0x8e, 0x05, 0xf3, 0x21, 0xf7, 0x08, 0x6a, 0x54, 0xda, - 0xa1, 0xeb, 0x37, 0x6b, 0xf5, 0x6d, 0xd2, 0x68, 0x7b, 0xae, 0xdf, 0x5c, 0x6b, 0xfa, 0x81, 0x2a, - 0x5e, 0xb9, 0x43, 0xea, 0x6d, 0xa6, 0x07, 0xef, 0x11, 0x17, 0x47, 0xbd, 0x3c, 0x5f, 0xbc, 0x7b, - 0x30, 0x37, 0x8f, 0x8f, 0x44, 0x1b, 0x1f, 0xb1, 0x2f, 0xe8, 0x3b, 0x16, 0x2c, 0xf0, 0x08, 0x2e, - 0xfd, 0xf7, 0xbf, 0xcb, 0x6d, 0xb4, 0x2a, 0x49, 0x25, 0x44, 0x36, 0x48, 0xb8, 0xbb, 0xf4, 0xa2, - 0x18, 0xd0, 0x85, 0xea, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0xaf, 0x8a, 0x30, 0x4e, 0x47, - 0x31, 0xf1, 0x82, 0x7f, 0xc1, 0x58, 0x12, 0x8f, 0xa4, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x3c, 0x0e, - 0xf0, 0x11, 0x4c, 0x7b, 0x4e, 0x14, 0x5f, 0x26, 0x4e, 0x18, 0x6f, 0x12, 0x87, 0x3d, 0xf5, 0x8a, - 0x65, 0x7e, 0x94, 0xd7, 0x63, 0xa5, 0xfe, 0xba, 0x9a, 0x26, 0x86, 0x3b, 0xe9, 0xa3, 0x3d, 0x40, - 0xec, 0x59, 0x39, 0x74, 0xfc, 0x88, 0x7f, 0x8b, 0x2b, 0x74, 0xe4, 0x47, 0x6b, 0x75, 0x56, 0xb4, - 0x8a, 0xae, 0x76, 0x50, 0xc3, 0x19, 0x2d, 0x68, 0xe6, 0x02, 0x83, 0xfd, 0x9a, 0x0b, 0x0c, 0xf5, - 0x30, 0x42, 0xdf, 0x85, 0x29, 0x31, 0x2b, 0x5b, 0x6e, 0x53, 0x1c, 0xd2, 0x6f, 0xa4, 0xcc, 0x89, - 0xac, 0xfe, 0x0d, 0x1f, 0x7a, 0xd8, 0x12, 0xd9, 0x3f, 0x0f, 0xa7, 0x68, 0x73, 0xa6, 0xc9, 0x74, - 0x84, 0x08, 0x4c, 0xee, 0xb4, 0x37, 0x89, 0x47, 0x62, 0x59, 0x26, 0x1a, 0xcd, 0x14, 0xfb, 0xcd, - 0xda, 0x89, 0x6c, 0x79, 0xc5, 0x24, 0x81, 0xd3, 0x34, 0xed, 0x5f, 0xb5, 0x80, 0x19, 0x26, 0x9e, - 0xc0, 0xf1, 0xf7, 0x29, 0xf3, 0xf8, 0x2b, 0xe7, 0x71, 0xa0, 0x9c, 0x93, 0xef, 0x79, 0x3e, 0x2d, - 0xd5, 0x30, 0xb8, 0xb3, 0x2f, 0x65, 0xff, 0xde, 0x12, 0xd7, 0xff, 0xb1, 0xf8, 0x86, 0x54, 0x0e, - 0x92, 0xe8, 0xb3, 0x30, 0x52, 0x77, 0x5a, 0x4e, 0x9d, 0xc7, 0x08, 0xcb, 0xd5, 0xfe, 0x18, 0x95, - 0xe6, 0x97, 0x45, 0x0d, 0xae, 0xcd, 0xf8, 0xb8, 0xfc, 0x4a, 0x59, 0xdc, 0x53, 0x83, 0xa1, 0x9a, - 0x9c, 0xdd, 0x81, 0x71, 0x83, 0xd8, 0x7d, 0xbd, 0xfa, 0x7e, 0x96, 0x1f, 0x17, 0xea, 0xc6, 0xb2, - 0x0b, 0xd3, 0xbe, 0xf6, 0x9f, 0x32, 0x47, 0x29, 0x4e, 0x7f, 0xb4, 0xd7, 0x81, 0xc0, 0x38, 0xa9, - 0x66, 0x78, 0x99, 0x22, 0x83, 0x3b, 0x29, 0xdb, 0x7f, 0xc7, 0x82, 0x07, 0x75, 0x44, 0xcd, 0x77, - 0xb5, 0x97, 0x3e, 0xb9, 0x02, 0x23, 0x41, 0x8b, 0x84, 0x4e, 0x72, 0x27, 0xbb, 0x20, 0x07, 0xfd, - 0xba, 0x28, 0x3f, 0x3c, 0x98, 0x9b, 0xd1, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0x22, 0x1b, 0x86, 0xd8, - 0x60, 0x44, 0xc2, 0xaf, 0x98, 0xc5, 0xd1, 0x62, 0xcf, 0x5d, 0x11, 0x16, 0x10, 0xfb, 0x17, 0x2c, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0x77, 0x61, 0x6a, 0x97, 0x5e, 0xdf, 0x56, 0xee, 0xb4, 0x42, 0xae, - 0x46, 0x97, 0xe3, 0xf4, 0x54, 0xaf, 0x71, 0xd2, 0x3e, 0x72, 0xa9, 0x2c, 0xfa, 0x3c, 0xb5, 0x9e, - 0x22, 0x86, 0x3b, 0xc8, 0xdb, 0x7f, 0x5a, 0xe0, 0x3b, 0x91, 0x49, 0x75, 0x4f, 0xc0, 0x70, 0x2b, - 0x68, 0x2c, 0xaf, 0x55, 0xb0, 0x18, 0x21, 0xc5, 0xae, 0xaa, 0xbc, 0x18, 0x4b, 0x38, 0xba, 0x08, - 0x40, 0xee, 0xc4, 0x24, 0xf4, 0x1d, 0x4f, 0x3d, 0xc6, 0x2b, 0xe1, 0x69, 0x45, 0x41, 0xb0, 0x86, - 0x45, 0xeb, 0xb4, 0xc2, 0x60, 0xcf, 0x6d, 0x30, 0xc7, 0x8e, 0xa2, 0x59, 0xa7, 0xaa, 0x20, 0x58, - 0xc3, 0xa2, 0x57, 0xe5, 0xb6, 0x1f, 0xf1, 0x03, 0xd0, 0xd9, 0x14, 0xa1, 0x78, 0x46, 0x92, 0xab, - 0xf2, 0x0d, 0x1d, 0x88, 0x4d, 0x5c, 0xb4, 0x08, 0x43, 0xb1, 0xc3, 0x9e, 0x98, 0x07, 0xf3, 0x4d, - 0x76, 0x36, 0x28, 0x86, 0x1e, 0x34, 0x8a, 0x56, 0xc0, 0xa2, 0x22, 0x7a, 0x53, 0xb2, 0x60, 0xce, - 0x92, 0x85, 0xe9, 0x55, 0xee, 0xb2, 0xd5, 0xd9, 0xb7, 0xce, 0x83, 0x85, 0x49, 0x97, 0x41, 0xcb, - 0xfe, 0x7c, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x7b, 0x1d, 0x2c, 0xe2, 0xe9, 0xee, 0xf2, 0xe1, 0xf1, - 0xf1, 0x07, 0xf4, 0x05, 0x0b, 0x46, 0x1d, 0xcf, 0x0b, 0xea, 0x4e, 0xcc, 0x46, 0xb9, 0xd0, 0x9d, - 0x45, 0x89, 0xf6, 0x17, 0x93, 0x1a, 0xbc, 0x0b, 0xcf, 0xc9, 0xd7, 0x63, 0x0d, 0xd2, 0xb3, 0x17, - 0x7a, 0xc3, 0xe8, 0xe3, 0xf2, 0x12, 0xc0, 0x97, 0xc7, 0x6c, 0xfa, 0x12, 0x50, 0x62, 0xdc, 0x58, - 0x93, 0xff, 0xd1, 0x0d, 0x23, 0x66, 0xcd, 0x40, 0xbe, 0x7b, 0xae, 0x21, 0xf4, 0xf4, 0x0a, 0x57, - 0x83, 0xaa, 0xba, 0x09, 0xfa, 0x60, 0xbe, 0x0f, 0xbb, 0x26, 0x5d, 0xf7, 0x30, 0x3f, 0x7f, 0x07, - 0x26, 0x1b, 0xe6, 0x71, 0x2b, 0x56, 0xd3, 0xe3, 0x79, 0x74, 0x53, 0xa7, 0x73, 0x72, 0xc0, 0xa6, - 0x00, 0x38, 0x4d, 0x18, 0x55, 0xb9, 0x33, 0xc0, 0x9a, 0xbf, 0x15, 0x08, 0x13, 0x3e, 0x3b, 0x77, - 0x2e, 0xf7, 0xa3, 0x98, 0xec, 0x52, 0xcc, 0xe4, 0x1c, 0xbd, 0x26, 0xea, 0x62, 0x45, 0x05, 0xbd, - 0x06, 0x43, 0xcc, 0x43, 0x2b, 0x2a, 0x8f, 0xe4, 0xeb, 0x01, 0x4d, 0xe7, 0xe2, 0x64, 0x53, 0xb1, - 0xbf, 0x11, 0x16, 0x14, 0xd0, 0x65, 0x19, 0x22, 0x20, 0x5a, 0xf3, 0x6f, 0x44, 0x84, 0x85, 0x08, - 0x28, 0x2d, 0x7d, 0x34, 0xf1, 0xfe, 0xe7, 0xe5, 0x99, 0xe1, 0x21, 0x8d, 0x9a, 0x54, 0x5e, 0x11, - 0xff, 0x65, 0xd4, 0xc9, 0x32, 0xe4, 0x77, 0xcf, 0x8c, 0x4c, 0x99, 0x0c, 0xe7, 0x4d, 0x93, 0x04, - 0x4e, 0xd3, 0x3c, 0xd1, 0xe3, 0x73, 0xd6, 0x87, 0xa9, 0xf4, 0xc6, 0xba, 0xaf, 0xc7, 0xf5, 0x0f, - 0x06, 0x60, 0xc2, 0x5c, 0x08, 0x68, 0x01, 0x4a, 0x82, 0x88, 0x0a, 0x17, 0xa6, 0xd6, 0xf6, 0xba, - 0x04, 0xe0, 0x04, 0x87, 0x85, 0x4b, 0x63, 0xd5, 0x35, 0xdb, 0xac, 0x24, 0x5c, 0x9a, 0x82, 0x60, - 0x0d, 0x8b, 0x0a, 0xd1, 0x9b, 0x41, 0x10, 0xab, 0xa3, 0x40, 0xad, 0x96, 0x25, 0x56, 0x8a, 0x05, - 0x94, 0x1e, 0x01, 0x3b, 0x24, 0xf4, 0x89, 0x67, 0x6a, 0x32, 0xd5, 0x11, 0x70, 0x45, 0x07, 0x62, - 0x13, 0x97, 0x1e, 0x69, 0x41, 0xc4, 0x96, 0x9f, 0x10, 0xd5, 0x13, 0x5b, 0xb7, 0x1a, 0xf7, 0x50, - 0x94, 0x70, 0xf4, 0x06, 0x3c, 0xa8, 0x1c, 0x0a, 0x31, 0xd7, 0x0c, 0xcb, 0x16, 0x87, 0x8c, 0x9b, - 0xf5, 0x83, 0xcb, 0xd9, 0x68, 0x38, 0xaf, 0x3e, 0x7a, 0x15, 0x26, 0x84, 0x08, 0x2c, 0x29, 0x0e, - 0x9b, 0xc6, 0x0a, 0x57, 0x0c, 0x28, 0x4e, 0x61, 0xa3, 0x0a, 0x4c, 0xd1, 0x12, 0x26, 0x85, 0x4a, - 0x0a, 0xdc, 0x31, 0x52, 0x9d, 0xf5, 0x57, 0x52, 0x70, 0xdc, 0x51, 0x03, 0x2d, 0xc2, 0x24, 0x97, - 0x51, 0xe8, 0x9d, 0x92, 0xcd, 0x83, 0xb0, 0xac, 0x55, 0x1b, 0xe1, 0xba, 0x09, 0xc6, 0x69, 0x7c, - 0xf4, 0x12, 0x8c, 0x39, 0x61, 0x7d, 0xdb, 0x8d, 0x49, 0x3d, 0x6e, 0x87, 0x3c, 0x00, 0x87, 0x66, - 0xed, 0xb1, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x07, 0xa7, 0x32, 0x8c, 0xf2, 0xe9, 0xc2, 0x71, - 0x5a, 0xae, 0xfc, 0xa6, 0x94, 0xd5, 0xda, 0x62, 0x75, 0x4d, 0x7e, 0x8d, 0x86, 0x45, 0x57, 0x27, - 0x53, 0x89, 0x6b, 0xa1, 0x61, 0xd5, 0xea, 0x5c, 0x95, 0x00, 0x9c, 0xe0, 0xd8, 0xbf, 0x0f, 0xa0, - 0x29, 0x74, 0xfa, 0xb0, 0x59, 0x7a, 0x09, 0xc6, 0x64, 0x3c, 0x63, 0x2d, 0x8e, 0xa6, 0xfa, 0xcc, - 0x4b, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, 0x8a, 0x02, 0x9a, 0xb2, 0x91, 0x4b, 0x62, 0x80, - 0x26, 0x38, 0xe8, 0x69, 0x18, 0x89, 0x88, 0xb7, 0x75, 0xd5, 0xf5, 0x77, 0xc4, 0xc2, 0x56, 0x5c, - 0xb8, 0x26, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, 0x6c, 0xbb, 0x0d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, - 0xbc, 0xb1, 0x56, 0x39, 0x3c, 0x98, 0x7b, 0x24, 0x2f, 0x4c, 0x33, 0xbd, 0xda, 0x47, 0xf3, 0x74, - 0xfb, 0xd1, 0xca, 0x59, 0x6f, 0x03, 0x43, 0x47, 0x7c, 0x1b, 0xb8, 0x08, 0x20, 0xbe, 0x5a, 0xae, - 0xe5, 0x62, 0x32, 0x6b, 0x97, 0x14, 0x04, 0x6b, 0x58, 0x28, 0x82, 0xe9, 0x7a, 0x48, 0x1c, 0x79, - 0x87, 0xe6, 0xe6, 0xe5, 0x23, 0xf7, 0xae, 0x20, 0x58, 0x4e, 0x13, 0xc3, 0x9d, 0xf4, 0x51, 0x00, - 0xd3, 0x0d, 0xe1, 0xbf, 0x9a, 0x34, 0x5a, 0x3a, 0xba, 0x4d, 0x3b, 0x33, 0xc8, 0x49, 0x13, 0xc2, - 0x9d, 0xb4, 0xd1, 0xdb, 0x30, 0x2b, 0x0b, 0x3b, 0x5d, 0x86, 0xd9, 0x76, 0x29, 0x2e, 0x9d, 0xbb, - 0x7b, 0x30, 0x37, 0x5b, 0xc9, 0xc5, 0xc2, 0x5d, 0x28, 0x20, 0x0c, 0x43, 0xec, 0x2d, 0x29, 0x2a, - 0x8f, 0xb2, 0x73, 0xee, 0xc9, 0x7c, 0x65, 0x00, 0x5d, 0xeb, 0xf3, 0xec, 0x1d, 0x4a, 0x98, 0xf9, - 0x26, 0xcf, 0x72, 0xac, 0x10, 0x0b, 0x4a, 0x68, 0x0b, 0x46, 0x1d, 0xdf, 0x0f, 0x62, 0x87, 0x8b, - 0x50, 0x63, 0xf9, 0xb2, 0x9f, 0x46, 0x78, 0x31, 0xa9, 0xc1, 0xa9, 0x2b, 0xcb, 0x41, 0x0d, 0x82, - 0x75, 0xc2, 0xe8, 0x36, 0x4c, 0x06, 0xb7, 0x29, 0x73, 0x94, 0x5a, 0x8a, 0xa8, 0x3c, 0xce, 0xda, - 0x7a, 0xbe, 0x4f, 0x3d, 0xad, 0x51, 0x59, 0xe3, 0x5a, 0x26, 0x51, 0x9c, 0x6e, 0x05, 0xcd, 0x1b, - 0xda, 0xea, 0x89, 0xc4, 0x9e, 0x3d, 0xd1, 0x56, 0xeb, 0xca, 0x69, 0xe6, 0x82, 0xce, 0xcd, 0x56, - 0xd9, 0xee, 0x9f, 0x4c, 0xb9, 0xa0, 0x27, 0x20, 0xac, 0xe3, 0xa1, 0x6d, 0x18, 0x4b, 0x9e, 0xac, - 0xc2, 0x88, 0x45, 0xa8, 0x19, 0xbd, 0x78, 0xb1, 0xbf, 0x8f, 0x5b, 0xd3, 0x6a, 0xf2, 0x9b, 0x83, - 0x5e, 0x82, 0x0d, 0xca, 0xb3, 0x3f, 0x05, 0xa3, 0xda, 0xc4, 0x1e, 0xc5, 0x2a, 0x7b, 0xf6, 0x55, - 0x98, 0x4a, 0x4f, 0xdd, 0x91, 0xac, 0xba, 0xff, 0x77, 0x01, 0x26, 0x33, 0x5e, 0xae, 0x58, 0xa8, - 0xe7, 0x14, 0x43, 0x4d, 0x22, 0x3b, 0x9b, 0x6c, 0xb1, 0xd0, 0x07, 0x5b, 0x94, 0x3c, 0xba, 0x98, - 0xcb, 0xa3, 0x05, 0x2b, 0x1c, 0x78, 0x3f, 0xac, 0xd0, 0x3c, 0x7d, 0x06, 0xfb, 0x3a, 0x7d, 0x8e, - 0x81, 0x7d, 0x1a, 0x07, 0xd8, 0x70, 0x1f, 0x07, 0xd8, 0x97, 0x0b, 0x30, 0x95, 0x8e, 0x27, 0x7c, - 0x02, 0xef, 0x1d, 0xaf, 0x19, 0xef, 0x1d, 0xd9, 0x81, 0xd3, 0xd3, 0x51, 0x8e, 0xf3, 0xde, 0x3e, - 0x70, 0xea, 0xed, 0xe3, 0xc9, 0xbe, 0xa8, 0x75, 0x7f, 0x07, 0xf9, 0xbb, 0x05, 0x38, 0x9d, 0xae, - 0xb2, 0xec, 0x39, 0xee, 0xee, 0x09, 0x8c, 0xcd, 0x75, 0x63, 0x6c, 0x9e, 0xe9, 0xe7, 0x6b, 0x58, - 0xd7, 0x72, 0x07, 0xe8, 0x56, 0x6a, 0x80, 0x16, 0xfa, 0x27, 0xd9, 0x7d, 0x94, 0xbe, 0x5b, 0x84, - 0x73, 0x99, 0xf5, 0x92, 0xe7, 0x82, 0x55, 0xe3, 0xb9, 0xe0, 0x62, 0xea, 0xb9, 0xc0, 0xee, 0x5e, - 0xfb, 0x78, 0xde, 0x0f, 0x84, 0xe7, 0x19, 0x8b, 0x9e, 0x76, 0x8f, 0x6f, 0x07, 0x86, 0xe7, 0x99, - 0x22, 0x84, 0x4d, 0xba, 0x3f, 0x4e, 0x6f, 0x06, 0xbf, 0x6f, 0xc1, 0x99, 0xcc, 0xb9, 0x39, 0x01, - 0xbd, 0xfa, 0x35, 0x53, 0xaf, 0xfe, 0x44, 0xdf, 0xab, 0x35, 0x47, 0xd1, 0xfe, 0xd5, 0x62, 0xce, - 0xb7, 0x30, 0xcd, 0xe4, 0x75, 0x18, 0x75, 0xea, 0x75, 0x12, 0x45, 0xeb, 0x41, 0x43, 0x05, 0x2b, - 0x7b, 0x86, 0x49, 0x1b, 0x49, 0xf1, 0xe1, 0xc1, 0xdc, 0x6c, 0x9a, 0x44, 0x02, 0xc6, 0x3a, 0x05, - 0x33, 0x00, 0x62, 0xe1, 0x58, 0x03, 0x20, 0x5e, 0x04, 0xd8, 0x53, 0xfa, 0x8a, 0xb4, 0x9a, 0x53, - 0xd3, 0x64, 0x68, 0x58, 0xe8, 0xe7, 0xd8, 0x2d, 0x80, 0x1b, 0x03, 0xf1, 0xa5, 0xf8, 0x5c, 0x9f, - 0x73, 0xa5, 0x1b, 0x16, 0x71, 0x17, 0x67, 0xa5, 0x12, 0x56, 0x24, 0xd1, 0xcf, 0xc0, 0x54, 0xc4, - 0x23, 0x68, 0x2c, 0x7b, 0x4e, 0xc4, 0x1c, 0x6b, 0xc4, 0x2a, 0x64, 0x7e, 0xcb, 0xb5, 0x14, 0x0c, - 0x77, 0x60, 0xdb, 0x5f, 0x1e, 0x80, 0x87, 0xba, 0x30, 0x1f, 0xb4, 0x68, 0x3e, 0xde, 0x3f, 0x95, - 0xd6, 0xdb, 0xcd, 0x66, 0x56, 0x36, 0x14, 0x79, 0xa9, 0x39, 0x2e, 0xbc, 0xef, 0x39, 0xfe, 0xa2, - 0xa5, 0x69, 0x54, 0xb9, 0x89, 0xef, 0xa7, 0x8e, 0xc8, 0x54, 0x8f, 0x51, 0xc5, 0xba, 0x95, 0xa1, - 0xa7, 0xbc, 0xd8, 0x77, 0x77, 0xfa, 0x56, 0x5c, 0x9e, 0xec, 0x53, 0xcf, 0xe7, 0x2c, 0x78, 0x24, - 0xb3, 0xbf, 0x86, 0xb1, 0xd1, 0x02, 0x94, 0xea, 0xb4, 0x50, 0x73, 0xb2, 0x4b, 0x5c, 0x5d, 0x25, - 0x00, 0x27, 0x38, 0x86, 0x4d, 0x51, 0xa1, 0xa7, 0x4d, 0xd1, 0xbf, 0xb4, 0x60, 0x26, 0xdd, 0x89, - 0x13, 0xe0, 0x80, 0x6b, 0x26, 0x07, 0xfc, 0x68, 0x3f, 0x73, 0x99, 0x67, 0x5d, 0x38, 0x01, 0x0f, - 0xe4, 0xe4, 0x8b, 0xd8, 0x83, 0xe9, 0x66, 0x9d, 0x98, 0xee, 0x8b, 0xe2, 0x63, 0x32, 0x3d, 0x3d, - 0xbb, 0xfa, 0x3a, 0xf2, 0x8b, 0x6c, 0x07, 0x0a, 0xee, 0x6c, 0x02, 0x7d, 0xce, 0x82, 0x19, 0xe7, - 0x76, 0xd4, 0x91, 0xbf, 0x49, 0xac, 0x99, 0xe7, 0x33, 0xf5, 0xab, 0x3d, 0xf2, 0x3d, 0x31, 0x17, - 0xa3, 0x99, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0xc2, 0x22, 0x2e, 0x24, 0x95, 0x93, 0xbb, 0x38, 0xd8, - 0x66, 0xb9, 0x3f, 0x71, 0x5e, 0x28, 0x21, 0x58, 0xd1, 0x41, 0x37, 0xa1, 0xd4, 0x94, 0x3e, 0x89, - 0x82, 0xd7, 0x66, 0x1e, 0x5e, 0x99, 0x8e, 0x8b, 0xdc, 0xe7, 0x43, 0x81, 0x70, 0x42, 0x0a, 0xbd, - 0x0a, 0x45, 0x7f, 0x2b, 0xea, 0x96, 0x30, 0x23, 0x65, 0x83, 0xc7, 0x3d, 0xa5, 0xaf, 0xad, 0xd6, - 0x30, 0xad, 0x88, 0x2e, 0x43, 0x31, 0xdc, 0x6c, 0x88, 0x27, 0x81, 0x4c, 0x79, 0x12, 0x2f, 0x55, - 0xb2, 0x17, 0x09, 0xa7, 0x84, 0x97, 0x2a, 0x98, 0x92, 0x40, 0xab, 0x30, 0xc8, 0x9c, 0x9d, 0x84, - 0xe6, 0x3f, 0x33, 0xe6, 0x43, 0x87, 0x23, 0x17, 0x77, 0xa2, 0x66, 0xc5, 0x98, 0x57, 0x47, 0xaf, - 0xc1, 0x50, 0x9d, 0x65, 0x92, 0x10, 0x6a, 0x9a, 0xec, 0x38, 0x26, 0x1d, 0xb9, 0x26, 0xf8, 0x7b, - 0x27, 0x2f, 0xc7, 0x82, 0x02, 0xda, 0x80, 0xa1, 0x3a, 0x69, 0x6d, 0x6f, 0x45, 0x42, 0xfb, 0xf2, - 0xf1, 0x4c, 0x5a, 0x5d, 0x12, 0xa7, 0x08, 0xaa, 0x0c, 0x03, 0x0b, 0x5a, 0xe8, 0x93, 0x50, 0xd8, - 0xaa, 0x0b, 0xbf, 0xa7, 0x4c, 0x8d, 0xbf, 0xe9, 0xd8, 0xbe, 0x34, 0x74, 0xf7, 0x60, 0xae, 0xb0, - 0xba, 0x8c, 0x0b, 0x5b, 0x75, 0x74, 0x0d, 0x86, 0xb7, 0xb8, 0x77, 0xb2, 0x88, 0xf8, 0xfb, 0x78, - 0xb6, 0xe3, 0x74, 0x87, 0x03, 0x33, 0xf7, 0xd7, 0x11, 0x00, 0x2c, 0x89, 0xa0, 0x0d, 0x80, 0x2d, - 0xe5, 0x65, 0x2d, 0x42, 0xfe, 0x7e, 0xb4, 0x1f, 0x5f, 0x6c, 0xa1, 0x8a, 0x50, 0xa5, 0x58, 0xa3, - 0x83, 0x3e, 0x03, 0x25, 0x47, 0xe6, 0x06, 0x62, 0xe1, 0x7e, 0x4d, 0xc9, 0x20, 0xd9, 0x7a, 0xdd, - 0xd3, 0x26, 0xf1, 0x75, 0xab, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc0, 0xf8, 0x5e, 0xd4, 0xda, 0x26, - 0x72, 0xab, 0xb2, 0x18, 0xc0, 0x39, 0x47, 0xd3, 0x4d, 0x81, 0xe8, 0x86, 0x71, 0xdb, 0xf1, 0x3a, - 0xb8, 0x0b, 0x73, 0xee, 0xba, 0xa9, 0x13, 0xc3, 0x26, 0x6d, 0x3a, 0xe8, 0xef, 0xb6, 0x83, 0xcd, - 0xfd, 0x98, 0x88, 0xc8, 0xc0, 0x99, 0x83, 0xfe, 0x3a, 0x47, 0xe9, 0x1c, 0x74, 0x01, 0xc0, 0x92, - 0x08, 0xdd, 0xcc, 0x8e, 0xcc, 0xbb, 0x25, 0xf4, 0x2d, 0x4f, 0xe4, 0x0e, 0x4f, 0x47, 0x7f, 0x93, - 0x41, 0x61, 0x5c, 0x30, 0x21, 0xc5, 0xb8, 0x5f, 0x6b, 0x3b, 0x88, 0x03, 0x3f, 0xc5, 0x79, 0xa7, - 0xf3, 0xb9, 0x5f, 0x35, 0x03, 0xbf, 0x93, 0xfb, 0x65, 0x61, 0xe1, 0xcc, 0xb6, 0x50, 0x03, 0x26, - 0x5a, 0x41, 0x18, 0xdf, 0x0e, 0x42, 0xb9, 0xaa, 0x50, 0x97, 0x8b, 0xb8, 0x81, 0x29, 0x5a, 0x64, - 0xb6, 0xda, 0x26, 0x04, 0xa7, 0x68, 0xa2, 0x4f, 0xc3, 0x70, 0x54, 0x77, 0x3c, 0xb2, 0x76, 0xbd, - 0x7c, 0x2a, 0xff, 0x58, 0xa9, 0x71, 0x94, 0x9c, 0xd5, 0xc5, 0x26, 0x47, 0xa0, 0x60, 0x49, 0x8e, - 0xf2, 0x21, 0x16, 0x6e, 0x9e, 0x05, 0x35, 0xce, 0xe1, 0x43, 0x1d, 0xf6, 0xcc, 0x9c, 0x0f, 0xb1, - 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xe4, 0xd1, 0x20, 0x2a, 0x9f, 0xce, 0xdf, 0x03, 0x42, 0x8c, - 0xbd, 0x5e, 0xeb, 0xb6, 0x07, 0x14, 0x12, 0x4e, 0x88, 0xda, 0xdf, 0x1c, 0xea, 0x94, 0x20, 0xd8, - 0xbd, 0xe3, 0xf3, 0x56, 0xc7, 0xa3, 0xfc, 0x27, 0xfa, 0x55, 0x83, 0x1c, 0xa3, 0xec, 0xf8, 0x39, - 0x0b, 0x1e, 0x68, 0x65, 0x7e, 0x94, 0x38, 0x8e, 0xfb, 0xd3, 0xa6, 0xf0, 0x61, 0x50, 0xe1, 0xc2, - 0xb3, 0xe1, 0x38, 0xa7, 0xa5, 0xb4, 0x7c, 0x5e, 0x7c, 0xdf, 0xf2, 0xf9, 0x3a, 0x8c, 0x30, 0x91, - 0x2f, 0x09, 0x4f, 0xd4, 0x97, 0x69, 0x1b, 0x3b, 0xd8, 0x97, 0x45, 0x45, 0xac, 0x48, 0xa0, 0x5f, - 0xb4, 0xe0, 0x6c, 0xba, 0xeb, 0x98, 0x30, 0xb0, 0x08, 0x75, 0xc9, 0xaf, 0x3c, 0xab, 0xe2, 0xfb, - 0xcf, 0x56, 0xbb, 0x21, 0x1f, 0xf6, 0x42, 0xc0, 0xdd, 0x1b, 0x43, 0x95, 0x8c, 0x3b, 0xd7, 0x90, - 0xf9, 0x66, 0xd7, 0xfb, 0xde, 0x85, 0x9e, 0x87, 0xb1, 0xdd, 0xa0, 0xed, 0x4b, 0xbf, 0x13, 0xe1, - 0x55, 0xcc, 0xf4, 0xc3, 0xeb, 0x5a, 0x39, 0x36, 0xb0, 0x4e, 0xf6, 0x0e, 0xf0, 0x35, 0x2b, 0x43, - 0x78, 0xe5, 0xb7, 0xc2, 0x57, 0xcc, 0x5b, 0xe1, 0x63, 0xe9, 0x5b, 0x61, 0x87, 0xf6, 0xce, 0xb8, - 0x10, 0xf6, 0x1f, 0xc2, 0xb7, 0xdf, 0xf8, 0x4d, 0xb6, 0x07, 0xe7, 0x7b, 0xb1, 0x69, 0x66, 0xde, - 0xd7, 0x50, 0xef, 0xde, 0x89, 0x79, 0x5f, 0x63, 0xad, 0x82, 0x19, 0xa4, 0xdf, 0x48, 0x20, 0xf6, - 0x7f, 0xb7, 0xa0, 0x58, 0x0d, 0x1a, 0x27, 0xa0, 0x8d, 0xfc, 0x94, 0xa1, 0x8d, 0x7c, 0x28, 0x27, - 0x9f, 0x67, 0xae, 0xee, 0x71, 0x25, 0xa5, 0x7b, 0x3c, 0x9b, 0x47, 0xa0, 0xbb, 0xa6, 0xf1, 0xef, - 0x15, 0x41, 0xcf, 0x3e, 0x8a, 0xfe, 0xf5, 0xbd, 0xd8, 0x89, 0x17, 0xbb, 0x25, 0x24, 0x15, 0x94, - 0x99, 0x55, 0xa0, 0x74, 0x41, 0xfd, 0x11, 0x33, 0x17, 0xbf, 0x45, 0xdc, 0xe6, 0x76, 0x4c, 0x1a, - 0xe9, 0xcf, 0x39, 0x39, 0x73, 0xf1, 0xff, 0x62, 0xc1, 0x64, 0xaa, 0x75, 0xe4, 0x65, 0xf9, 0xb3, - 0xdd, 0xa3, 0x16, 0x6a, 0xba, 0xa7, 0x03, 0xdc, 0x3c, 0x80, 0x7a, 0xea, 0x91, 0x9a, 0x1e, 0x26, - 0x05, 0xab, 0xb7, 0xa0, 0x08, 0x6b, 0x18, 0xe8, 0x05, 0x18, 0x8d, 0x83, 0x56, 0xe0, 0x05, 0xcd, - 0xfd, 0x2b, 0x44, 0xc6, 0x9e, 0x51, 0x0f, 0x72, 0x1b, 0x09, 0x08, 0xeb, 0x78, 0xf6, 0xaf, 0x15, - 0x21, 0x9d, 0xb1, 0xf6, 0x27, 0x6b, 0xf2, 0xc3, 0xb9, 0x26, 0xbf, 0x6b, 0xc1, 0x14, 0x6d, 0x9d, - 0x59, 0x5c, 0x49, 0x43, 0x6b, 0x95, 0xeb, 0xc3, 0xea, 0x92, 0xeb, 0xe3, 0x31, 0xca, 0xbb, 0x1a, - 0x41, 0x3b, 0x16, 0x9a, 0x22, 0x8d, 0x39, 0xd1, 0x52, 0x2c, 0xa0, 0x02, 0x8f, 0x84, 0xa1, 0xf0, - 0x52, 0xd3, 0xf1, 0x48, 0x18, 0x62, 0x01, 0x95, 0xa9, 0x40, 0x06, 0x72, 0x52, 0x81, 0xb0, 0xb0, - 0x6d, 0xc2, 0xca, 0x47, 0x08, 0x14, 0x5a, 0xd8, 0x36, 0x69, 0xfe, 0x93, 0xe0, 0xd8, 0x5f, 0x2f, - 0xc2, 0x58, 0x35, 0x68, 0x24, 0x8f, 0x2d, 0xcf, 0x1b, 0x8f, 0x2d, 0xe7, 0x53, 0x8f, 0x2d, 0x53, - 0x3a, 0xee, 0x4f, 0x9e, 0x56, 0x3e, 0xa8, 0xa7, 0x95, 0x3f, 0xb3, 0x60, 0xa2, 0x1a, 0x34, 0xe8, - 0x02, 0xfd, 0x71, 0x5a, 0x8d, 0x7a, 0x50, 0xc0, 0xa1, 0x2e, 0x41, 0x01, 0xff, 0xbe, 0x05, 0xc3, - 0xd5, 0xa0, 0x71, 0x02, 0x5a, 0xd4, 0x57, 0x4c, 0x2d, 0xea, 0x83, 0x39, 0x5c, 0x36, 0x47, 0x71, - 0xfa, 0x8d, 0x22, 0x8c, 0xd3, 0x7e, 0x06, 0x4d, 0x39, 0x4b, 0xc6, 0x88, 0x58, 0x7d, 0x8c, 0x08, - 0x15, 0xe6, 0x02, 0xcf, 0x0b, 0x6e, 0xa7, 0x67, 0x6c, 0x95, 0x95, 0x62, 0x01, 0x45, 0x4f, 0xc3, - 0x48, 0x2b, 0x24, 0x7b, 0x6e, 0xd0, 0x8e, 0xd2, 0x7e, 0xae, 0x55, 0x51, 0x8e, 0x15, 0x06, 0x95, - 0xdb, 0x23, 0xd7, 0xaf, 0x13, 0x69, 0xf9, 0x33, 0xc0, 0x2c, 0x7f, 0x78, 0x5c, 0x55, 0xad, 0x1c, - 0x1b, 0x58, 0xe8, 0x16, 0x94, 0xd8, 0x7f, 0xb6, 0x6f, 0x8e, 0x9e, 0xe9, 0x43, 0x04, 0x33, 0x17, - 0x04, 0x70, 0x42, 0x0b, 0x5d, 0x04, 0x88, 0xa5, 0x8d, 0x52, 0x24, 0xdc, 0xb0, 0x95, 0x44, 0xa9, - 0xac, 0x97, 0x22, 0xac, 0x61, 0xa1, 0xa7, 0xa0, 0x14, 0x3b, 0xae, 0x77, 0xd5, 0xf5, 0x49, 0x24, - 0x6c, 0xbc, 0x44, 0xac, 0x72, 0x51, 0x88, 0x13, 0x38, 0x3d, 0xd1, 0x99, 0x93, 0x3f, 0xcf, 0x13, - 0x34, 0xc2, 0xb0, 0xd9, 0x89, 0x7e, 0x55, 0x95, 0x62, 0x0d, 0xc3, 0x7e, 0x09, 0x4e, 0x57, 0x83, - 0x46, 0x35, 0x08, 0xe3, 0xd5, 0x20, 0xbc, 0xed, 0x84, 0x0d, 0x39, 0x7f, 0x73, 0x32, 0x6c, 0x36, - 0x3d, 0x75, 0x07, 0xb9, 0x36, 0xc0, 0x08, 0x88, 0xfd, 0x1c, 0x3b, 0xd3, 0x8f, 0xe8, 0x90, 0xf3, - 0xef, 0x0a, 0x80, 0xaa, 0xcc, 0x8a, 0xca, 0x48, 0x26, 0xf5, 0x36, 0x4c, 0x44, 0xe4, 0xaa, 0xeb, - 0xb7, 0xef, 0xc8, 0xfb, 0x55, 0x17, 0x6f, 0xa7, 0xda, 0x8a, 0x8e, 0xc9, 0x75, 0x2b, 0x66, 0x19, - 0x4e, 0x51, 0xa3, 0x43, 0x18, 0xb6, 0xfd, 0xc5, 0xe8, 0x46, 0x44, 0x42, 0x91, 0x3c, 0x89, 0x0d, - 0x21, 0x96, 0x85, 0x38, 0x81, 0xd3, 0x25, 0xc3, 0xfe, 0x5c, 0x0b, 0x7c, 0x1c, 0x04, 0xb1, 0x5c, - 0x64, 0x2c, 0xfd, 0x86, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0x6a, 0xb7, 0x5a, 0x1e, 0x7b, - 0x9a, 0x74, 0xbc, 0x4b, 0x61, 0xd0, 0x6e, 0xf1, 0xe7, 0x25, 0x91, 0xb9, 0xa2, 0xd6, 0x01, 0xc5, - 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0xe1, 0xe7, 0xcf, 0xb5, 0x9c, 0x35, 0x56, 0x84, - 0x25, 0xcc, 0xfe, 0x2c, 0x3b, 0xcc, 0x58, 0xce, 0x9b, 0xb8, 0x1d, 0x12, 0xb4, 0x0b, 0xe3, 0x2d, - 0x76, 0x60, 0xc5, 0x61, 0xe0, 0x79, 0x44, 0xca, 0x8d, 0xf7, 0x66, 0xd1, 0xc5, 0x73, 0x60, 0xe8, - 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xfc, 0x24, 0xe3, 0x4b, 0x35, 0x7e, 0x69, 0x19, 0x16, 0x76, 0xda, - 0x42, 0x42, 0x9b, 0xcd, 0xcf, 0x31, 0x97, 0x70, 0x7a, 0x61, 0xeb, 0x8d, 0x65, 0x5d, 0xf4, 0x3a, - 0x7b, 0x97, 0xe3, 0xcc, 0xa0, 0x57, 0x76, 0x4b, 0x8e, 0x65, 0x3c, 0xc1, 0x89, 0x8a, 0x58, 0x23, - 0x82, 0xae, 0xc2, 0xb8, 0x48, 0x91, 0x22, 0x14, 0x0f, 0x45, 0xe3, 0xfa, 0x3b, 0x8e, 0x75, 0xe0, - 0x61, 0xba, 0x00, 0x9b, 0x95, 0x51, 0x13, 0xce, 0x6a, 0x09, 0xbd, 0x32, 0xac, 0x0a, 0x39, 0x6f, - 0x79, 0xe4, 0xee, 0xc1, 0xdc, 0xd9, 0x8d, 0x6e, 0x88, 0xb8, 0x3b, 0x1d, 0x74, 0x1d, 0x4e, 0x3b, - 0xf5, 0xd8, 0xdd, 0x23, 0x15, 0xe2, 0x34, 0x3c, 0xd7, 0x27, 0x66, 0xe0, 0x87, 0x33, 0x77, 0x0f, - 0xe6, 0x4e, 0x2f, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0x15, 0x28, 0x35, 0xfc, 0x48, 0x8c, 0xc1, - 0x90, 0x91, 0xab, 0xae, 0x54, 0xb9, 0x56, 0x53, 0xdf, 0x9f, 0xfc, 0xc1, 0x49, 0x05, 0xd4, 0x84, - 0x31, 0xdd, 0xb9, 0x4b, 0xe4, 0x39, 0x7c, 0xa6, 0xcb, 0xdd, 0xd6, 0xf0, 0x88, 0xe2, 0x5a, 0x37, - 0x65, 0xb3, 0x6b, 0x38, 0x4b, 0x19, 0x84, 0xd1, 0x6b, 0x80, 0x22, 0x12, 0xee, 0xb9, 0x75, 0xb2, - 0x58, 0x67, 0x81, 0x87, 0x99, 0xae, 0x66, 0xc4, 0x70, 0x40, 0x41, 0xb5, 0x0e, 0x0c, 0x9c, 0x51, - 0x0b, 0x5d, 0xa6, 0x1c, 0x45, 0x2f, 0x15, 0x26, 0xd6, 0x52, 0xcc, 0x2b, 0x57, 0x48, 0x2b, 0x24, - 0x75, 0x27, 0x26, 0x0d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0xe7, 0x00, 0xa6, 0x61, - 0x70, 0x67, 0x4e, 0x07, 0x7a, 0x43, 0xda, 0x0e, 0xa2, 0xf8, 0x1a, 0x89, 0x6f, 0x07, 0xe1, 0x8e, - 0x88, 0xd6, 0x96, 0x04, 0x73, 0x4c, 0x40, 0x58, 0xc7, 0xa3, 0x12, 0x11, 0x7b, 0x18, 0x5b, 0xab, - 0xb0, 0x17, 0x8b, 0x91, 0x64, 0x9f, 0x5c, 0xe6, 0xc5, 0x58, 0xc2, 0x25, 0xea, 0x5a, 0x75, 0x99, - 0xbd, 0x43, 0xa4, 0x50, 0xd7, 0xaa, 0xcb, 0x58, 0xc2, 0x11, 0xe9, 0xcc, 0x03, 0x38, 0x91, 0xff, - 0x82, 0xd4, 0xc9, 0x97, 0xfb, 0x4c, 0x05, 0xe8, 0xc3, 0x94, 0xca, 0x40, 0xc8, 0xc3, 0xd8, 0x45, - 0xe5, 0x49, 0xb6, 0x48, 0xfa, 0x8f, 0x81, 0xa7, 0x74, 0x71, 0x6b, 0x29, 0x4a, 0xb8, 0x83, 0xb6, - 0x11, 0x50, 0x64, 0xaa, 0x67, 0x3e, 0x8e, 0x05, 0x28, 0x45, 0xed, 0xcd, 0x46, 0xb0, 0xeb, 0xb8, - 0x3e, 0x7b, 0x36, 0xd0, 0x04, 0x91, 0x9a, 0x04, 0xe0, 0x04, 0x07, 0xad, 0xc2, 0x88, 0x23, 0x2e, - 0x5f, 0x42, 0xd1, 0x9f, 0x19, 0x61, 0x40, 0x5e, 0xd0, 0xb8, 0x1e, 0x54, 0xfe, 0xc3, 0xaa, 0x2e, - 0x7a, 0x19, 0xc6, 0x85, 0x13, 0x9c, 0xb0, 0x5f, 0x3d, 0x65, 0xfa, 0x4b, 0xd4, 0x74, 0x20, 0x36, - 0x71, 0xd1, 0xcf, 0xc1, 0x04, 0xa5, 0x92, 0x30, 0xb6, 0xf2, 0x4c, 0x3f, 0x1c, 0x51, 0x8b, 0xb3, - 0xae, 0x57, 0xc6, 0x29, 0x62, 0xa8, 0x01, 0x0f, 0x3b, 0xed, 0x38, 0x60, 0xca, 0x4a, 0x73, 0xfd, - 0x6f, 0x04, 0x3b, 0xc4, 0x67, 0xda, 0xfd, 0x91, 0xa5, 0xf3, 0x77, 0x0f, 0xe6, 0x1e, 0x5e, 0xec, - 0x82, 0x87, 0xbb, 0x52, 0x41, 0x37, 0x60, 0x34, 0x0e, 0x3c, 0x61, 0x78, 0x1e, 0x95, 0x1f, 0xc8, - 0x0f, 0x88, 0xb4, 0xa1, 0xd0, 0x74, 0x75, 0x82, 0xaa, 0x8a, 0x75, 0x3a, 0x68, 0x83, 0xef, 0x31, - 0x16, 0xbe, 0x93, 0x44, 0xe5, 0x07, 0xf3, 0x07, 0x46, 0x45, 0xf9, 0x34, 0xb7, 0xa0, 0xa8, 0x89, - 0x75, 0x32, 0xe8, 0x12, 0x4c, 0xb7, 0x42, 0x37, 0x60, 0x0b, 0x5b, 0x29, 0x8a, 0xcb, 0x46, 0xa8, - 0xbc, 0xe9, 0x6a, 0x1a, 0x01, 0x77, 0xd6, 0x41, 0x17, 0xa8, 0x80, 0xca, 0x0b, 0xcb, 0x67, 0x78, - 0x9e, 0x16, 0x2e, 0x9c, 0xf2, 0x32, 0xac, 0xa0, 0xb3, 0x3f, 0x0d, 0xd3, 0x1d, 0x9c, 0xf2, 0x48, - 0x46, 0xc0, 0xff, 0x64, 0x10, 0x4a, 0x4a, 0x1d, 0x88, 0x16, 0x4c, 0x2d, 0xef, 0x99, 0xb4, 0x96, - 0x77, 0x84, 0xca, 0x6b, 0xba, 0x62, 0x77, 0x23, 0x23, 0xcd, 0xfc, 0xf9, 0x1c, 0xd6, 0xd0, 0xbf, - 0xc7, 0xde, 0x11, 0x52, 0xf0, 0x27, 0x17, 0xc6, 0x81, 0xae, 0x17, 0xc6, 0x3e, 0x53, 0x3e, 0xd2, - 0xab, 0x61, 0x2b, 0x68, 0xac, 0x55, 0xd3, 0x39, 0xd0, 0xaa, 0xb4, 0x10, 0x73, 0x18, 0x13, 0xee, - 0xe9, 0xb1, 0xce, 0x84, 0xfb, 0xe1, 0x7b, 0x14, 0xee, 0x25, 0x01, 0x9c, 0xd0, 0x42, 0x1e, 0x4c, - 0xd7, 0xcd, 0xf4, 0x75, 0xca, 0x4b, 0xef, 0xd1, 0x9e, 0x89, 0xe4, 0xda, 0x5a, 0x5e, 0x9b, 0xe5, - 0x34, 0x15, 0xdc, 0x49, 0x18, 0xbd, 0x0c, 0x23, 0xef, 0x06, 0x11, 0x5b, 0x76, 0xe2, 0x6c, 0x93, - 0x7e, 0x51, 0x23, 0xaf, 0x5f, 0xaf, 0xb1, 0xf2, 0xc3, 0x83, 0xb9, 0xd1, 0x6a, 0xd0, 0x90, 0x7f, - 0xb1, 0xaa, 0x80, 0xee, 0xc0, 0x69, 0x83, 0x23, 0xa8, 0xee, 0x42, 0xff, 0xdd, 0x3d, 0x2b, 0x9a, - 0x3b, 0xbd, 0x96, 0x45, 0x09, 0x67, 0x37, 0x60, 0x7f, 0x93, 0x2b, 0x3d, 0x85, 0x6a, 0x84, 0x44, - 0x6d, 0xef, 0x24, 0x92, 0x57, 0xac, 0x18, 0x5a, 0x9b, 0x7b, 0x56, 0xac, 0xff, 0x9e, 0xc5, 0x14, - 0xeb, 0x1b, 0x64, 0xb7, 0xe5, 0x39, 0xf1, 0x49, 0x98, 0x7e, 0xbf, 0x0e, 0x23, 0xb1, 0x68, 0xad, - 0x5b, 0xbe, 0x0d, 0xad, 0x53, 0xec, 0x71, 0x41, 0x1d, 0x88, 0xb2, 0x14, 0x2b, 0x32, 0xf6, 0x3f, - 0xe3, 0x33, 0x20, 0x21, 0x27, 0xa0, 0x5b, 0xa8, 0x98, 0xba, 0x85, 0xb9, 0x1e, 0x5f, 0x90, 0xa3, - 0x63, 0xf8, 0xa7, 0x66, 0xbf, 0xd9, 0xdd, 0xe3, 0xc3, 0xfe, 0xa2, 0x63, 0xff, 0xb2, 0x05, 0x33, - 0x59, 0x26, 0x01, 0x54, 0x88, 0xe1, 0x37, 0x1f, 0xf5, 0xc2, 0xa5, 0x46, 0xf0, 0xa6, 0x28, 0xc7, - 0x0a, 0xa3, 0xef, 0x98, 0xf7, 0x47, 0x0b, 0x02, 0x76, 0x1d, 0xcc, 0x4c, 0x87, 0xe8, 0x55, 0xee, - 0xcb, 0x61, 0xa9, 0x54, 0x84, 0x47, 0xf3, 0xe3, 0xb0, 0x7f, 0xbd, 0x00, 0x33, 0x5c, 0x45, 0xbd, - 0xb8, 0x17, 0xb8, 0x8d, 0x6a, 0xd0, 0x10, 0x9e, 0x2d, 0x6f, 0xc2, 0x58, 0x4b, 0xbb, 0xae, 0x76, - 0x0b, 0x43, 0xa4, 0x5f, 0x6b, 0x93, 0x6b, 0x83, 0x5e, 0x8a, 0x0d, 0x5a, 0xa8, 0x01, 0x63, 0x64, - 0xcf, 0xad, 0x2b, 0x3d, 0x67, 0xe1, 0xc8, 0x2c, 0x5d, 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, - 0xfb, 0x90, 0x99, 0xc6, 0xfe, 0x8a, 0x05, 0x0f, 0xe6, 0x04, 0x2d, 0xa2, 0xcd, 0xdd, 0x66, 0x8f, - 0x01, 0x22, 0x6d, 0xa6, 0x6a, 0x8e, 0x3f, 0x11, 0x60, 0x01, 0x45, 0x9f, 0x06, 0xe0, 0x2a, 0x7e, - 0x2a, 0x45, 0x8b, 0x4f, 0xef, 0x2f, 0x98, 0x87, 0x16, 0xf1, 0x41, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, - 0x57, 0x8b, 0x30, 0xc8, 0x73, 0xa8, 0xaf, 0xc2, 0xf0, 0x36, 0x0f, 0x91, 0xdc, 0x4f, 0x34, 0xe6, - 0xe4, 0x3a, 0xc2, 0x0b, 0xb0, 0xac, 0x8c, 0xd6, 0xe1, 0x94, 0xf0, 0x9e, 0xaa, 0x10, 0xcf, 0xd9, - 0x97, 0xb7, 0x5a, 0x9e, 0xae, 0x44, 0x86, 0xd2, 0x3f, 0xb5, 0xd6, 0x89, 0x82, 0xb3, 0xea, 0xa1, - 0x57, 0x3b, 0x02, 0x23, 0xf2, 0xe0, 0xd2, 0x4a, 0x06, 0xee, 0x11, 0x1c, 0xf1, 0x65, 0x18, 0x6f, - 0x75, 0xdc, 0xdf, 0xb5, 0xf4, 0xd5, 0xe6, 0x9d, 0xdd, 0xc4, 0x65, 0x56, 0x05, 0x6d, 0x66, 0x43, - 0xb1, 0xb1, 0x1d, 0x92, 0x68, 0x3b, 0xf0, 0x1a, 0x22, 0x57, 0x6b, 0x62, 0x55, 0x90, 0x82, 0xe3, - 0x8e, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0xed, 0x90, 0x24, 0x54, 0x86, 0x4c, 0x2a, 0xab, 0x29, - 0x38, 0xee, 0xa8, 0x41, 0xd7, 0xd1, 0x69, 0x91, 0xe8, 0x53, 0xfa, 0xd4, 0x2b, 0x53, 0x91, 0x61, - 0x69, 0x5b, 0xdf, 0x25, 0xce, 0x8b, 0x78, 0xf2, 0x57, 0xa9, 0x42, 0xb5, 0x34, 0x72, 0xc2, 0xaa, - 0x5e, 0x52, 0xb9, 0x97, 0x74, 0x93, 0x7f, 0x6c, 0xc1, 0xa9, 0x0c, 0x43, 0x32, 0xce, 0xaa, 0x9a, - 0x6e, 0x14, 0xab, 0x2c, 0x19, 0x1a, 0xab, 0xe2, 0xe5, 0x58, 0x61, 0xd0, 0xfd, 0xc0, 0x99, 0x61, - 0x9a, 0x01, 0x0a, 0x93, 0x0f, 0x01, 0x3d, 0x1a, 0x03, 0x44, 0xe7, 0x61, 0xa0, 0x1d, 0x91, 0x50, - 0xe6, 0x69, 0x94, 0xfc, 0x9b, 0x69, 0x04, 0x19, 0x84, 0x4a, 0x94, 0x4d, 0xa5, 0x8c, 0xd3, 0x24, - 0x4a, 0xae, 0x8e, 0xe3, 0x30, 0xfb, 0x4b, 0x45, 0x38, 0x93, 0x6b, 0x14, 0x4a, 0xbb, 0xb4, 0x1b, - 0xf8, 0x6e, 0x1c, 0xa8, 0x08, 0x7d, 0x3c, 0x20, 0x09, 0x69, 0x6d, 0xaf, 0x8b, 0x72, 0xac, 0x30, - 0xd0, 0x63, 0x32, 0x8d, 0x6f, 0x3a, 0x0f, 0xc8, 0x52, 0xc5, 0xc8, 0xe4, 0xdb, 0x6f, 0x42, 0x9f, - 0x47, 0x61, 0xa0, 0x15, 0xa8, 0x1c, 0xeb, 0x6a, 0x66, 0x69, 0x77, 0x83, 0xc0, 0xc3, 0x0c, 0x88, - 0x3e, 0x26, 0xc6, 0x21, 0xf5, 0x72, 0x81, 0x9d, 0x46, 0x10, 0x69, 0x83, 0xf1, 0x04, 0x0c, 0xef, - 0x90, 0xfd, 0xd0, 0xf5, 0x9b, 0xe9, 0x77, 0x9b, 0x2b, 0xbc, 0x18, 0x4b, 0xb8, 0x99, 0x23, 0x6f, - 0xb8, 0x57, 0x8e, 0xbc, 0xa3, 0xa6, 0xe3, 0x19, 0xe9, 0x79, 0xb4, 0x7d, 0xb1, 0x08, 0x93, 0x78, - 0xa9, 0xf2, 0x93, 0x89, 0xb8, 0xd1, 0x39, 0x11, 0xc7, 0x9d, 0x1c, 0xa9, 0xf7, 0x6c, 0x7c, 0xc3, - 0x82, 0x49, 0x16, 0x96, 0x58, 0x04, 0xd4, 0x70, 0x03, 0xff, 0x04, 0x44, 0xb7, 0x47, 0x61, 0x30, - 0xa4, 0x8d, 0xa6, 0x33, 0x9e, 0xb0, 0x9e, 0x60, 0x0e, 0x43, 0x0f, 0xc3, 0x00, 0xeb, 0x02, 0x9d, - 0xbc, 0x31, 0x9e, 0x98, 0xa0, 0xe2, 0xc4, 0x0e, 0x66, 0xa5, 0xcc, 0xb3, 0x11, 0x93, 0x96, 0xe7, - 0xf2, 0x4e, 0x27, 0x1a, 0xf0, 0x0f, 0x87, 0x67, 0x63, 0x66, 0xd7, 0xde, 0x9f, 0x67, 0x63, 0x36, - 0xc9, 0xee, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x73, 0x99, 0xf5, 0xfa, 0xf6, 0x6c, 0xec, 0x5e, 0xfb, - 0x78, 0x9e, 0xdf, 0xb3, 0x5f, 0xc5, 0x8b, 0x27, 0xf8, 0x2a, 0x3e, 0xd0, 0xaf, 0xe4, 0x38, 0xd8, - 0x87, 0xc3, 0x61, 0xe6, 0x90, 0x7d, 0x48, 0x1c, 0x0e, 0x33, 0xfb, 0x96, 0x73, 0xad, 0xfb, 0x61, - 0x21, 0xe7, 0x5b, 0xd8, 0x05, 0xef, 0x02, 0xe5, 0x33, 0x0c, 0x18, 0x09, 0x49, 0x78, 0x8c, 0xf3, - 0x18, 0x5e, 0x86, 0x15, 0x14, 0xb9, 0x9a, 0xeb, 0x1e, 0xef, 0xda, 0xcb, 0x47, 0xda, 0x32, 0xf3, - 0xe6, 0x83, 0x85, 0x1e, 0xfd, 0x23, 0xed, 0xc6, 0xb7, 0xae, 0x5d, 0xca, 0x8b, 0xfd, 0x5f, 0xca, - 0xc7, 0xb2, 0x2f, 0xe4, 0x68, 0x11, 0x26, 0x77, 0x5d, 0x9f, 0xa5, 0x2d, 0x36, 0x45, 0x51, 0xe5, - 0xc9, 0xbe, 0x6e, 0x82, 0x71, 0x1a, 0x7f, 0xf6, 0x65, 0x18, 0xbf, 0x77, 0x2d, 0xe2, 0x77, 0x8b, - 0xf0, 0x50, 0x97, 0x6d, 0xcf, 0x79, 0xbd, 0x31, 0x07, 0x1a, 0xaf, 0xef, 0x98, 0x87, 0x2a, 0xcc, - 0x6c, 0xb5, 0x3d, 0x6f, 0x9f, 0x19, 0x9e, 0x91, 0x86, 0xc4, 0x10, 0xb2, 0xa2, 0x8a, 0x39, 0xbe, - 0x9a, 0x81, 0x83, 0x33, 0x6b, 0xa2, 0xd7, 0x00, 0x05, 0x9b, 0x2c, 0x0e, 0x76, 0x23, 0x89, 0x69, - 0xc2, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xeb, 0x1d, 0x18, 0x38, 0xa3, 0x16, 0x15, 0xfa, 0xe9, 0xa9, - 0xb4, 0xaf, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, 0xba, 0x04, 0xd3, 0xce, 0x9e, - 0xe3, 0xf2, 0x18, 0x77, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, 0x31, 0x8d, 0x80, 0x3b, 0xeb, - 0xa4, 0x7c, 0x10, 0x87, 0xf2, 0x7d, 0x10, 0xbb, 0xf3, 0xc5, 0x5e, 0xaa, 0x58, 0xfb, 0x3f, 0x5a, - 0xf4, 0xf8, 0xca, 0xc8, 0x93, 0x4b, 0xc7, 0x41, 0xa9, 0x14, 0x35, 0x77, 0x40, 0x35, 0x0e, 0xcb, - 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xdd, 0xbb, 0x21, 0xba, 0x0b, 0x3f, 0x5d, 0x85, - 0x81, 0xde, 0x80, 0xe1, 0x86, 0xbb, 0xe7, 0x46, 0x41, 0x28, 0x36, 0xcb, 0x51, 0x73, 0xc3, 0x2b, - 0x3e, 0x58, 0xe1, 0x64, 0xb0, 0xa4, 0x67, 0x7f, 0xb1, 0x00, 0xe3, 0xb2, 0xc5, 0xd7, 0xdb, 0x41, - 0xec, 0x9c, 0xc0, 0xb1, 0x7c, 0xc9, 0x38, 0x96, 0x3f, 0xd6, 0xcd, 0x59, 0x99, 0x75, 0x29, 0xf7, - 0x38, 0xbe, 0x9e, 0x3a, 0x8e, 0x1f, 0xef, 0x4d, 0xaa, 0xfb, 0x31, 0xfc, 0xcf, 0x2d, 0x98, 0x36, - 0xf0, 0x4f, 0xe0, 0x34, 0x58, 0x35, 0x4f, 0x83, 0x47, 0x7a, 0x7e, 0x43, 0xce, 0x29, 0xf0, 0xb5, - 0x42, 0xaa, 0xef, 0x8c, 0xfb, 0xbf, 0x0b, 0x03, 0xdb, 0x4e, 0xd8, 0xe8, 0x16, 0xa9, 0xb5, 0xa3, - 0xd2, 0xfc, 0x65, 0x27, 0x6c, 0x70, 0x1e, 0xfe, 0xb4, 0x4a, 0xe1, 0xe7, 0x84, 0x8d, 0x9e, 0x6e, - 0x1e, 0xac, 0x29, 0xf4, 0x12, 0x0c, 0x45, 0xf5, 0xa0, 0xa5, 0xcc, 0x61, 0xcf, 0xf3, 0xf4, 0x7e, - 0xb4, 0xe4, 0xf0, 0x60, 0x0e, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, 0x67, 0x9b, 0x50, 0x52, 0x4d, - 0xdf, 0x57, 0x43, 0xff, 0x3f, 0x2c, 0xc2, 0xa9, 0x8c, 0x75, 0x81, 0x22, 0x63, 0xb4, 0x9e, 0xed, - 0x73, 0x39, 0xbd, 0xcf, 0xf1, 0x8a, 0xd8, 0x8d, 0xa5, 0x21, 0xe6, 0xbf, 0xef, 0x46, 0x6f, 0x44, - 0x24, 0xdd, 0x28, 0x2d, 0xea, 0xdd, 0x28, 0x6d, 0xec, 0xc4, 0x86, 0x9a, 0x36, 0xa4, 0x7a, 0x7a, - 0x5f, 0xe7, 0xf4, 0x4f, 0x8a, 0x30, 0x93, 0x15, 0xe3, 0x00, 0xfd, 0x7c, 0x2a, 0xef, 0xcb, 0xf3, - 0xfd, 0x46, 0x47, 0xe0, 0xc9, 0x60, 0x44, 0x50, 0xa8, 0x79, 0x33, 0x13, 0x4c, 0xcf, 0x61, 0x16, - 0x6d, 0x32, 0xbf, 0xab, 0x90, 0xe7, 0xeb, 0x91, 0x5b, 0xfc, 0x13, 0x7d, 0x77, 0x40, 0x24, 0xfa, - 0x89, 0x52, 0x7e, 0x57, 0xb2, 0xb8, 0xb7, 0xdf, 0x95, 0x6c, 0x79, 0xd6, 0x85, 0x51, 0xed, 0x6b, - 0xee, 0xeb, 0x8c, 0xef, 0xd0, 0x13, 0x45, 0xeb, 0xf7, 0x7d, 0x9d, 0xf5, 0xaf, 0x58, 0x90, 0x32, - 0x5d, 0x53, 0x2a, 0x29, 0x2b, 0x57, 0x25, 0x75, 0x1e, 0x06, 0xc2, 0xc0, 0x23, 0xe9, 0x54, 0x20, - 0x38, 0xf0, 0x08, 0x66, 0x10, 0x95, 0xaf, 0xbb, 0x98, 0x97, 0xaf, 0x9b, 0x5e, 0x8d, 0x3d, 0xb2, - 0x47, 0xa4, 0x36, 0x42, 0xf1, 0xe4, 0xab, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x8d, 0x01, 0x38, 0xdb, - 0xd5, 0xdf, 0x90, 0x5e, 0x59, 0x9a, 0x4e, 0x4c, 0x6e, 0x3b, 0xfb, 0xe9, 0x40, 0xc5, 0x97, 0x78, - 0x31, 0x96, 0x70, 0x66, 0x68, 0xcb, 0x63, 0x1d, 0xa6, 0x14, 0x78, 0x22, 0xc4, 0xa1, 0x80, 0x9a, - 0x8a, 0xa3, 0xe2, 0x71, 0x28, 0x8e, 0x2e, 0x02, 0x44, 0x91, 0xb7, 0xe2, 0x53, 0x09, 0xac, 0x21, - 0x2c, 0x78, 0x93, 0x98, 0x98, 0xb5, 0xab, 0x02, 0x82, 0x35, 0x2c, 0x54, 0x81, 0xa9, 0x56, 0x18, - 0xc4, 0x5c, 0x1f, 0x5a, 0xe1, 0xb6, 0x23, 0x83, 0xa6, 0xd3, 0x58, 0x35, 0x05, 0xc7, 0x1d, 0x35, - 0xd0, 0x0b, 0x30, 0x2a, 0x1c, 0xc9, 0xaa, 0x41, 0xe0, 0x09, 0x55, 0x8d, 0xb2, 0x44, 0xa8, 0x25, - 0x20, 0xac, 0xe3, 0x69, 0xd5, 0x98, 0x92, 0x75, 0x38, 0xb3, 0x1a, 0x57, 0xb4, 0x6a, 0x78, 0xa9, - 0x78, 0x27, 0x23, 0x7d, 0xc5, 0x3b, 0x49, 0x94, 0x57, 0xa5, 0xbe, 0xdf, 0x95, 0xa0, 0xa7, 0xba, - 0xe7, 0x37, 0x06, 0xe0, 0x94, 0x58, 0x38, 0xf7, 0x7b, 0xb9, 0xdc, 0xa7, 0xdc, 0xdf, 0x3f, 0x59, - 0x33, 0x27, 0xbd, 0x66, 0xbe, 0x59, 0x84, 0x21, 0x3e, 0x15, 0x27, 0x20, 0xc3, 0xaf, 0x0a, 0xa5, - 0x5f, 0x97, 0x88, 0x21, 0xbc, 0x2f, 0xf3, 0x15, 0x27, 0x76, 0xf8, 0xf9, 0xa5, 0xd8, 0x68, 0xa2, - 0x1e, 0x44, 0xf3, 0x06, 0xa3, 0x9d, 0x4d, 0x69, 0xb5, 0x80, 0xd3, 0xd0, 0xd8, 0xee, 0xdb, 0x00, - 0x11, 0xcb, 0x3f, 0x4d, 0x69, 0x88, 0xd8, 0x33, 0x4f, 0x76, 0x69, 0xbd, 0xa6, 0x90, 0x79, 0x1f, - 0x92, 0x25, 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xfb, 0x22, 0x94, 0x14, 0x72, 0x2f, 0x15, 0xc0, 0x98, - 0x7e, 0xea, 0x7d, 0x0a, 0x26, 0x53, 0x6d, 0x1d, 0x49, 0x83, 0xf0, 0x5b, 0x16, 0x4c, 0xf2, 0x2e, - 0xaf, 0xf8, 0x7b, 0x62, 0xb3, 0xbf, 0x07, 0x33, 0x5e, 0xc6, 0xa6, 0x13, 0x33, 0xda, 0xff, 0x26, - 0x55, 0x1a, 0x83, 0x2c, 0x28, 0xce, 0x6c, 0x03, 0x5d, 0x80, 0x11, 0x9e, 0x39, 0xdf, 0xf1, 0x84, - 0x37, 0xc1, 0x18, 0xcf, 0x1d, 0xc0, 0xcb, 0xb0, 0x82, 0xda, 0xdf, 0xb3, 0x60, 0x9a, 0xf7, 0xfc, - 0x0a, 0xd9, 0x57, 0xb7, 0xe3, 0x0f, 0xb2, 0xef, 0x22, 0x35, 0x42, 0x21, 0x27, 0x35, 0x82, 0xfe, - 0x69, 0xc5, 0xae, 0x9f, 0xf6, 0xeb, 0x16, 0x88, 0x15, 0x78, 0x02, 0xf7, 0xc0, 0x9f, 0x36, 0xef, - 0x81, 0xb3, 0xf9, 0x8b, 0x3a, 0xe7, 0x02, 0xf8, 0x67, 0x16, 0x4c, 0x71, 0x84, 0xe4, 0x21, 0xf2, - 0x03, 0x9d, 0x87, 0x7e, 0xf2, 0x75, 0xa9, 0x04, 0xc9, 0xd9, 0x1f, 0x65, 0x4c, 0xd6, 0x40, 0xd7, - 0xc9, 0x6a, 0xc8, 0x0d, 0x74, 0x84, 0x3c, 0x74, 0x47, 0x8e, 0xe6, 0x69, 0xff, 0x37, 0x0b, 0x10, - 0x6f, 0xc6, 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, - 0x58, 0xc7, 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xef, 0xd7, 0xe4, 0x23, 0x8c, 0xe8, 0x5f, 0x1d, - 0x80, 0xb4, 0xe9, 0x32, 0xba, 0x09, 0x63, 0x75, 0xa7, 0xe5, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, - 0xea, 0x66, 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, - 0x2b, 0x74, 0xf7, 0x5c, 0x8f, 0x34, 0xd9, 0x55, 0x98, 0xf9, 0x2f, 0x71, 0xdb, 0x0a, 0x59, 0x8a, - 0x35, 0x8c, 0x0c, 0x7f, 0x97, 0xe2, 0xfd, 0xf3, 0x77, 0x19, 0x38, 0xa2, 0xbf, 0xcb, 0x60, 0x5f, - 0xfe, 0x2e, 0x18, 0x1e, 0x90, 0x67, 0x37, 0xfd, 0xbf, 0xea, 0x7a, 0x44, 0x08, 0x6c, 0xdc, 0xab, - 0x69, 0xf6, 0xee, 0xc1, 0xdc, 0x03, 0x38, 0x13, 0x03, 0xe7, 0xd4, 0x44, 0x9f, 0x86, 0xb2, 0xe3, - 0x79, 0xc1, 0x6d, 0x35, 0x6a, 0x2b, 0x51, 0xdd, 0xf1, 0x92, 0xe0, 0xd6, 0x23, 0x4b, 0x0f, 0xdf, - 0x3d, 0x98, 0x2b, 0x2f, 0xe6, 0xe0, 0xe0, 0xdc, 0xda, 0xf6, 0x0e, 0x9c, 0xaa, 0x91, 0x50, 0xa6, - 0xb6, 0x54, 0x5b, 0x6c, 0x03, 0x4a, 0x61, 0x8a, 0xa9, 0xf4, 0x15, 0xfa, 0x42, 0x0b, 0x3c, 0x28, - 0x99, 0x48, 0x42, 0xc8, 0xfe, 0x53, 0x0b, 0x86, 0x85, 0x39, 0xf4, 0x09, 0xc8, 0x32, 0x8b, 0x86, - 0x3e, 0x72, 0x2e, 0x9b, 0xf1, 0xb2, 0xce, 0xe4, 0x6a, 0x22, 0xd7, 0x52, 0x9a, 0xc8, 0x47, 0xba, - 0x11, 0xe9, 0xae, 0x83, 0xfc, 0xa5, 0x22, 0x4c, 0x98, 0xa6, 0xe0, 0x27, 0x30, 0x04, 0xd7, 0x60, - 0x38, 0x12, 0x7e, 0x07, 0x85, 0x7c, 0xfb, 0xd5, 0xf4, 0x24, 0x26, 0x56, 0x2e, 0xc2, 0xd3, 0x40, - 0x12, 0xc9, 0x74, 0x68, 0x28, 0xde, 0x47, 0x87, 0x86, 0x5e, 0xd6, 0xf8, 0x03, 0xc7, 0x61, 0x8d, - 0x6f, 0x7f, 0x8b, 0x31, 0x7f, 0xbd, 0xfc, 0x04, 0xe4, 0x82, 0x4b, 0xe6, 0x31, 0x61, 0x77, 0x59, - 0x59, 0xa2, 0x53, 0x39, 0xf2, 0xc1, 0x3f, 0xb2, 0x60, 0x54, 0x20, 0x9e, 0x40, 0xb7, 0x7f, 0xc6, - 0xec, 0xf6, 0x43, 0x5d, 0xba, 0x9d, 0xd3, 0xdf, 0xbf, 0x5d, 0x50, 0xfd, 0xad, 0x06, 0x61, 0xdc, - 0x57, 0xb2, 0x83, 0x11, 0x7a, 0x1b, 0x0c, 0xea, 0x81, 0x27, 0x0e, 0xf3, 0x87, 0x13, 0xc7, 0x56, - 0x5e, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x9b, 0xf9, 0x5d, 0x06, 0x61, 0x2c, 0x0e, 0xd0, 0xc4, 0xef, - 0x32, 0x08, 0x63, 0xcc, 0x20, 0xa8, 0x01, 0x10, 0x3b, 0x61, 0x93, 0xc4, 0xb4, 0x4c, 0x78, 0x82, - 0xe7, 0xef, 0xc2, 0x76, 0xec, 0x7a, 0xf3, 0xae, 0x1f, 0x47, 0x71, 0x38, 0xbf, 0xe6, 0xc7, 0xd7, - 0x43, 0x7e, 0x37, 0xd0, 0x3c, 0x55, 0x15, 0x2d, 0xac, 0xd1, 0x95, 0xae, 0x52, 0xac, 0x8d, 0x41, - 0xf3, 0xa1, 0xf0, 0x9a, 0x28, 0xc7, 0x0a, 0xc3, 0x7e, 0x91, 0xf1, 0x64, 0x36, 0x40, 0x47, 0x73, - 0x22, 0xfd, 0xce, 0x88, 0x1a, 0x5a, 0xf6, 0x4a, 0x50, 0xd1, 0x5d, 0x55, 0xbb, 0xb3, 0x40, 0xda, - 0xb0, 0xee, 0x16, 0x90, 0xf8, 0xb3, 0xa2, 0x9f, 0xed, 0x78, 0x3f, 0x7e, 0xa6, 0x07, 0x2f, 0x3d, - 0xc2, 0x8b, 0x31, 0x8b, 0xbc, 0xc9, 0x22, 0x14, 0xae, 0x55, 0xd3, 0xe9, 0x28, 0x96, 0x25, 0x00, - 0x27, 0x38, 0x68, 0x41, 0xdc, 0x2c, 0xb9, 0x7e, 0xee, 0xa1, 0xd4, 0xcd, 0x52, 0x7e, 0xbe, 0x76, - 0xb5, 0x7c, 0x16, 0x46, 0x55, 0x8a, 0xaf, 0x2a, 0xcf, 0x94, 0x54, 0xe2, 0xb2, 0xd4, 0x4a, 0x52, - 0x8c, 0x75, 0x1c, 0xb4, 0x01, 0x93, 0x11, 0xcf, 0x3f, 0x26, 0xbd, 0x97, 0x84, 0xde, 0xe0, 0x49, - 0xf9, 0xee, 0x5c, 0x33, 0xc1, 0x87, 0xac, 0x88, 0x6f, 0x56, 0xe9, 0xef, 0x94, 0x26, 0x81, 0x5e, - 0x85, 0x09, 0x4f, 0xcf, 0xc3, 0x5c, 0x15, 0x6a, 0x05, 0x65, 0x96, 0x69, 0x64, 0x69, 0xae, 0xe2, - 0x14, 0x36, 0x15, 0x02, 0xf4, 0x12, 0x11, 0x0c, 0xcb, 0xf1, 0x9b, 0x24, 0x12, 0x09, 0x8a, 0x98, - 0x10, 0x70, 0x35, 0x07, 0x07, 0xe7, 0xd6, 0x46, 0x2f, 0xc1, 0x98, 0xfc, 0x7c, 0xcd, 0x9b, 0x2f, - 0x31, 0xfe, 0xd5, 0x60, 0xd8, 0xc0, 0x44, 0xb7, 0xe1, 0xb4, 0xfc, 0xbf, 0x11, 0x3a, 0x5b, 0x5b, - 0x6e, 0x5d, 0x38, 0x53, 0x8e, 0x32, 0x12, 0x8b, 0xd2, 0x13, 0x62, 0x25, 0x0b, 0xe9, 0xf0, 0x60, - 0xee, 0xbc, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x1d, 0x4e, 0x6d, 0x13, 0xc7, - 0x8b, 0xb7, 0x97, 0xb7, 0x49, 0x7d, 0x47, 0x6e, 0x22, 0xe6, 0x23, 0xa8, 0x99, 0xcc, 0x5e, 0xee, - 0x44, 0xc1, 0x59, 0xf5, 0xd0, 0x5b, 0x50, 0x6e, 0xb5, 0x37, 0x3d, 0x37, 0xda, 0xbe, 0x16, 0xc4, - 0xec, 0xa9, 0x5b, 0x65, 0xc8, 0x12, 0xce, 0x84, 0xca, 0x3f, 0xb2, 0x9a, 0x83, 0x87, 0x73, 0x29, - 0xa0, 0xf7, 0xe0, 0x74, 0x6a, 0x31, 0xf0, 0xa4, 0x6b, 0xc2, 0xe9, 0xf0, 0x89, 0xec, 0xed, 0x94, - 0x51, 0x81, 0xbb, 0xb8, 0x66, 0x82, 0x70, 0x76, 0x13, 0xef, 0xcf, 0x00, 0xe2, 0x5d, 0x5a, 0x59, - 0x93, 0x6e, 0xd0, 0x67, 0x60, 0x4c, 0x5f, 0x45, 0xe2, 0x80, 0x79, 0xac, 0x57, 0xce, 0x71, 0x21, - 0x1b, 0xa9, 0x15, 0xa5, 0xc3, 0xb0, 0x41, 0xd1, 0x26, 0x90, 0xfd, 0x7d, 0xe8, 0x2a, 0x8c, 0xd4, - 0x3d, 0x97, 0xf8, 0xf1, 0x5a, 0xb5, 0x9b, 0x13, 0xfc, 0xb2, 0xc0, 0x11, 0x03, 0x26, 0x62, 0xb1, - 0xf1, 0x32, 0xac, 0x28, 0xd8, 0xbf, 0x5b, 0x80, 0xb9, 0x1e, 0xe1, 0xf8, 0x52, 0x3a, 0x40, 0xab, - 0x2f, 0x1d, 0xe0, 0xa2, 0xcc, 0xf7, 0x75, 0x2d, 0x75, 0xff, 0x4c, 0xe5, 0xf2, 0x4a, 0x6e, 0xa1, - 0x69, 0xfc, 0xbe, 0xed, 0x26, 0x75, 0x35, 0xe2, 0x40, 0x4f, 0x8b, 0x5e, 0xe3, 0xf9, 0x60, 0xb0, - 0x7f, 0x89, 0x3e, 0x57, 0x15, 0x6c, 0x7f, 0xab, 0x00, 0xa7, 0xd5, 0x10, 0xfe, 0xf8, 0x0e, 0xdc, - 0x8d, 0xce, 0x81, 0x3b, 0x06, 0x45, 0xba, 0x7d, 0x1d, 0x86, 0x6a, 0xfb, 0x51, 0x3d, 0xf6, 0xfa, - 0x10, 0x80, 0x1e, 0x35, 0x36, 0x68, 0x72, 0x4c, 0xb3, 0x94, 0x9d, 0x62, 0xbf, 0xda, 0x7f, 0xc9, - 0x82, 0xc9, 0x8d, 0xe5, 0x6a, 0x2d, 0xa8, 0xef, 0x90, 0x78, 0x91, 0xab, 0x89, 0xb0, 0x90, 0x7f, - 0xac, 0x7b, 0x94, 0x6b, 0xb2, 0x24, 0xa6, 0xf3, 0x30, 0xb0, 0x1d, 0x44, 0x71, 0xfa, 0x95, 0xed, - 0x72, 0x10, 0xc5, 0x98, 0x41, 0xec, 0x3f, 0xb2, 0x60, 0x90, 0x65, 0xa9, 0xec, 0x95, 0xcd, 0xb4, - 0x9f, 0xef, 0x42, 0x2f, 0xc0, 0x10, 0xd9, 0xda, 0x22, 0xf5, 0x58, 0xcc, 0xaa, 0xf4, 0xae, 0x1b, - 0x5a, 0x61, 0xa5, 0xf4, 0xd0, 0x67, 0x8d, 0xf1, 0xbf, 0x58, 0x20, 0xa3, 0x5b, 0x50, 0x8a, 0xdd, - 0x5d, 0xb2, 0xd8, 0x68, 0x88, 0x77, 0x8a, 0x7b, 0x70, 0x66, 0xdc, 0x90, 0x04, 0x70, 0x42, 0xcb, - 0xfe, 0x52, 0x01, 0x20, 0xf1, 0xc0, 0xed, 0xf5, 0x89, 0x4b, 0x1d, 0x09, 0x5b, 0x1f, 0xcb, 0x48, - 0xd8, 0x8a, 0x12, 0x82, 0x19, 0xe9, 0x5a, 0xd5, 0x30, 0x15, 0xfb, 0x1a, 0xa6, 0x81, 0xa3, 0x0c, - 0xd3, 0x32, 0x4c, 0x27, 0x1e, 0xc4, 0x66, 0x38, 0x05, 0x16, 0x8a, 0x7b, 0x23, 0x0d, 0xc4, 0x9d, - 0xf8, 0xf6, 0x17, 0x2c, 0x10, 0xee, 0x06, 0x7d, 0x2c, 0xe6, 0x37, 0x65, 0x6e, 0x45, 0x23, 0x3e, - 0xe8, 0xf9, 0x7c, 0xff, 0x0b, 0x11, 0x15, 0x54, 0x1d, 0x1e, 0x46, 0x2c, 0x50, 0x83, 0x96, 0xfd, - 0xd7, 0x0b, 0x30, 0xca, 0xc1, 0x2c, 0xf6, 0x64, 0x1f, 0xbd, 0x39, 0x52, 0x20, 0x77, 0x96, 0x76, - 0x90, 0x12, 0x56, 0xf1, 0xbe, 0xf5, 0xb4, 0x83, 0x12, 0x80, 0x13, 0x1c, 0xf4, 0x04, 0x0c, 0x47, - 0xed, 0x4d, 0x86, 0x9e, 0x32, 0x6f, 0xaf, 0xf1, 0x62, 0x2c, 0xe1, 0xe8, 0xd3, 0x30, 0xc5, 0xeb, - 0x85, 0x41, 0xcb, 0x69, 0x72, 0xdd, 0xce, 0xa0, 0xf2, 0x37, 0x9b, 0x5a, 0x4f, 0xc1, 0x0e, 0x0f, - 0xe6, 0x66, 0xd2, 0x65, 0x4c, 0x2b, 0xd8, 0x41, 0x85, 0xae, 0xd8, 0xa9, 0xb4, 0x2b, 0x0b, 0xba, - 0x0c, 0x43, 0x9c, 0x19, 0x09, 0xe6, 0xd0, 0xe5, 0xad, 0x47, 0x73, 0x80, 0x61, 0x31, 0xae, 0x05, - 0x3f, 0x13, 0xf5, 0xd1, 0x5b, 0x30, 0xda, 0x08, 0x6e, 0xfb, 0xb7, 0x9d, 0xb0, 0xb1, 0x58, 0x5d, - 0x13, 0xf3, 0x99, 0x29, 0xd3, 0x54, 0x12, 0x34, 0xdd, 0xa9, 0x86, 0xe9, 0x35, 0x13, 0x10, 0xd6, - 0xc9, 0xa1, 0x0d, 0x16, 0x2e, 0x89, 0xe7, 0x05, 0xef, 0x66, 0x0f, 0xa6, 0x52, 0x89, 0x6b, 0x94, - 0xc7, 0x45, 0x4c, 0x25, 0x91, 0x55, 0x3c, 0x21, 0x64, 0x7f, 0xee, 0x14, 0x18, 0xeb, 0xc8, 0x08, - 0xe4, 0x6e, 0x1d, 0x53, 0x20, 0x77, 0x0c, 0x23, 0x64, 0xb7, 0x15, 0xef, 0x57, 0xdc, 0xb0, 0x5b, - 0x06, 0x8f, 0x15, 0x81, 0xd3, 0x49, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x1d, 0x6d, 0xbf, 0xf8, 0x01, - 0x46, 0xdb, 0x1f, 0x38, 0xc1, 0x68, 0xfb, 0xd7, 0x60, 0xb8, 0xe9, 0xc6, 0x98, 0xb4, 0x02, 0x71, - 0x10, 0x67, 0xae, 0x84, 0x4b, 0x1c, 0xa5, 0x33, 0xfe, 0xb3, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa6, - 0xf6, 0xc0, 0x50, 0xbe, 0x1c, 0xdb, 0xf9, 0x2c, 0x90, 0xb9, 0x0b, 0x44, 0x74, 0xfd, 0xe1, 0x7b, - 0x8d, 0xae, 0xaf, 0x62, 0xe2, 0x8f, 0xbc, 0xbf, 0x98, 0xf8, 0x46, 0xf6, 0x80, 0xd2, 0xf1, 0x65, - 0x0f, 0xf8, 0x82, 0x05, 0xa7, 0x5b, 0x59, 0x89, 0x34, 0x44, 0x74, 0xfb, 0x17, 0xfa, 0xce, 0x14, - 0x62, 0x34, 0xc8, 0x2e, 0x34, 0x99, 0x68, 0x38, 0xbb, 0x39, 0x3a, 0xd0, 0xe1, 0x66, 0x43, 0x84, - 0xc4, 0x7f, 0x34, 0x27, 0x0d, 0x41, 0x97, 0xe4, 0x03, 0xf7, 0x27, 0x0c, 0x7e, 0x92, 0x8a, 0x60, - 0xfc, 0x7d, 0xa7, 0x22, 0x78, 0x4d, 0xa5, 0x22, 0xe8, 0x12, 0x94, 0x86, 0x27, 0x1a, 0xe8, 0x99, - 0x80, 0x40, 0x4b, 0x22, 0x30, 0x79, 0x1c, 0x49, 0x04, 0xde, 0x36, 0x99, 0x3d, 0x8f, 0x68, 0xff, - 0x54, 0x0f, 0x66, 0x6f, 0xd0, 0xed, 0xce, 0xee, 0x79, 0xc2, 0x84, 0xe9, 0x7b, 0x4a, 0x98, 0x70, - 0x53, 0x4f, 0x45, 0x80, 0x7a, 0xc4, 0xda, 0xa7, 0x48, 0x7d, 0x26, 0x20, 0xb8, 0xa9, 0x1f, 0x41, - 0xa7, 0xf2, 0xe9, 0xaa, 0x93, 0xa6, 0x93, 0x6e, 0xd6, 0x21, 0xd4, 0x99, 0xd8, 0x60, 0xe6, 0x64, - 0x12, 0x1b, 0x9c, 0x3e, 0xf6, 0xc4, 0x06, 0x0f, 0x9c, 0x40, 0x62, 0x83, 0x07, 0x3f, 0xd0, 0xc4, - 0x06, 0xe5, 0xfb, 0x90, 0xd8, 0xe0, 0x5a, 0x92, 0xd8, 0xe0, 0x4c, 0xfe, 0x94, 0x64, 0xd8, 0x8b, - 0xe5, 0xa4, 0x33, 0xb8, 0x09, 0xa5, 0x96, 0xf4, 0x76, 0x2e, 0xcf, 0xe6, 0x4f, 0x49, 0xa6, 0x4b, - 0x34, 0x9f, 0x12, 0x05, 0xc2, 0x09, 0x29, 0x4a, 0x37, 0x49, 0x6f, 0xf0, 0x50, 0x17, 0x95, 0x55, - 0x96, 0x32, 0xa0, 0x4b, 0x52, 0x83, 0xbf, 0x5c, 0x80, 0x73, 0xdd, 0xd7, 0x75, 0xa2, 0x49, 0xa8, - 0x26, 0x9a, 0xef, 0x94, 0x26, 0x81, 0x09, 0x5d, 0x1a, 0x56, 0xdf, 0x21, 0x21, 0x2e, 0xc1, 0xb4, - 0x32, 0x14, 0xf3, 0xdc, 0xfa, 0xbe, 0x96, 0x0d, 0x4d, 0x39, 0xad, 0xd4, 0xd2, 0x08, 0xb8, 0xb3, - 0x0e, 0x5a, 0x84, 0x49, 0xa3, 0x70, 0xad, 0x22, 0x84, 0x7d, 0xa5, 0xba, 0xa8, 0x99, 0x60, 0x9c, - 0xc6, 0xb7, 0xbf, 0x66, 0xc1, 0x83, 0x39, 0x31, 0x8e, 0xfb, 0x8e, 0x78, 0xb0, 0x05, 0x93, 0x2d, - 0xb3, 0x6a, 0x8f, 0xc0, 0x28, 0x46, 0x24, 0x65, 0xd5, 0xd7, 0x14, 0x00, 0xa7, 0x89, 0x2e, 0x5d, - 0xf8, 0xf6, 0xf7, 0xcf, 0x7d, 0xe4, 0x0f, 0xbe, 0x7f, 0xee, 0x23, 0xdf, 0xfb, 0xfe, 0xb9, 0x8f, - 0xfc, 0xf9, 0xbb, 0xe7, 0xac, 0x6f, 0xdf, 0x3d, 0x67, 0xfd, 0xc1, 0xdd, 0x73, 0xd6, 0xf7, 0xee, - 0x9e, 0xb3, 0xfe, 0xf8, 0xee, 0x39, 0xeb, 0x4b, 0x3f, 0x38, 0xf7, 0x91, 0x37, 0x0b, 0x7b, 0xcf, - 0xfe, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0xea, 0xa0, 0xb7, 0x43, 0x3c, 0xd5, 0x00, 0x00, + // 12346 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6d, 0x90, 0x24, 0x47, + 0x56, 0xd8, 0x55, 0xf7, 0x7c, 0xf5, 0x9b, 0xef, 0xdc, 0x5d, 0xa9, 0x77, 0x24, 0x6d, 0xaf, 0x4a, + 0x77, 0xd2, 0xea, 0x6b, 0xe6, 0xb4, 0x92, 0x4e, 0xcb, 0xe9, 0x4e, 0x30, 0x33, 0x3d, 0xb3, 0xdb, + 0xda, 0x9d, 0xd9, 0x56, 0xf6, 0xec, 0xee, 0x9d, 0x10, 0xe7, 0xab, 0xe9, 0xce, 0x99, 0x29, 0x4d, + 0x4d, 0x55, 0xab, 0xaa, 0x7a, 0x76, 0x47, 0x01, 0x11, 0xf6, 0x19, 0xf0, 0x07, 0xfc, 0x20, 0x6c, + 0xc2, 0xc6, 0x40, 0xe0, 0x08, 0x1b, 0x07, 0x9c, 0xb1, 0x1d, 0xc6, 0x60, 0xc0, 0x80, 0x6d, 0x8c, + 0x1d, 0x0e, 0xf8, 0x83, 0xc1, 0xfe, 0x71, 0x44, 0x10, 0x1e, 0xc3, 0x40, 0xd8, 0xc1, 0x0f, 0x3b, + 0x6c, 0xf3, 0x8b, 0x31, 0x36, 0x8e, 0xfc, 0xac, 0xcc, 0xea, 0xaa, 0xee, 0x9e, 0xd5, 0xec, 0x48, + 0x10, 0xf7, 0xaf, 0x3b, 0xdf, 0xcb, 0x97, 0x59, 0xf9, 0xf1, 0xf2, 0xe5, 0xcb, 0xf7, 0x01, 0x6f, + 0xee, 0x5e, 0x8b, 0xe6, 0xdd, 0x60, 0x61, 0xb7, 0xb3, 0x49, 0x42, 0x9f, 0xc4, 0x24, 0x5a, 0xd8, + 0x27, 0x7e, 0x2b, 0x08, 0x17, 0x04, 0xc0, 0x69, 0xbb, 0x0b, 0xcd, 0x20, 0x24, 0x0b, 0xfb, 0xaf, + 0x2c, 0x6c, 0x13, 0x9f, 0x84, 0x4e, 0x4c, 0x5a, 0xf3, 0xed, 0x30, 0x88, 0x03, 0x84, 0x38, 0xce, + 0xbc, 0xd3, 0x76, 0xe7, 0x29, 0xce, 0xfc, 0xfe, 0x2b, 0x73, 0x2f, 0x6f, 0xbb, 0xf1, 0x4e, 0x67, + 0x73, 0xbe, 0x19, 0xec, 0x2d, 0x6c, 0x07, 0xdb, 0xc1, 0x02, 0x43, 0xdd, 0xec, 0x6c, 0xb1, 0x7f, + 0xec, 0x0f, 0xfb, 0xc5, 0x49, 0xcc, 0xad, 0x25, 0xcd, 0x90, 0x07, 0x31, 0xf1, 0x23, 0x37, 0xf0, + 0xa3, 0x97, 0x9d, 0xb6, 0x1b, 0x91, 0x70, 0x9f, 0x84, 0x0b, 0xed, 0xdd, 0x6d, 0x0a, 0x8b, 0x4c, + 0x84, 0x85, 0xfd, 0x57, 0x36, 0x49, 0xec, 0x74, 0xf5, 0x68, 0xee, 0xb5, 0x84, 0xdc, 0x9e, 0xd3, + 0xdc, 0x71, 0x7d, 0x12, 0x1e, 0x48, 0x1a, 0x0b, 0x21, 0x89, 0x82, 0x4e, 0xd8, 0x24, 0x27, 0xaa, + 0x15, 0x2d, 0xec, 0x91, 0xd8, 0xc9, 0xf8, 0xfa, 0xb9, 0x85, 0xbc, 0x5a, 0x61, 0xc7, 0x8f, 0xdd, + 0xbd, 0xee, 0x66, 0x3e, 0xd7, 0xaf, 0x42, 0xd4, 0xdc, 0x21, 0x7b, 0x4e, 0x57, 0xbd, 0x57, 0xf3, + 0xea, 0x75, 0x62, 0xd7, 0x5b, 0x70, 0xfd, 0x38, 0x8a, 0xc3, 0x74, 0x25, 0xfb, 0x1b, 0x16, 0x5c, + 0x5e, 0xbc, 0xd7, 0x58, 0xf1, 0x9c, 0x28, 0x76, 0x9b, 0x4b, 0x5e, 0xd0, 0xdc, 0x6d, 0xc4, 0x41, + 0x48, 0xee, 0x06, 0x5e, 0x67, 0x8f, 0x34, 0xd8, 0x40, 0xa0, 0x97, 0x60, 0x6c, 0x9f, 0xfd, 0xaf, + 0x55, 0xcb, 0xd6, 0x65, 0xeb, 0x4a, 0x69, 0x69, 0xe6, 0xd7, 0x0e, 0x2b, 0x9f, 0x3a, 0x3a, 0xac, + 0x8c, 0xdd, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x67, 0x61, 0x64, 0x2b, 0xda, 0x38, 0x68, 0x93, 0x72, + 0x81, 0xe1, 0x4e, 0x09, 0xdc, 0x91, 0xd5, 0x06, 0x2d, 0xc5, 0x02, 0x8a, 0x16, 0xa0, 0xd4, 0x76, + 0xc2, 0xd8, 0x8d, 0xdd, 0xc0, 0x2f, 0x17, 0x2f, 0x5b, 0x57, 0x86, 0x97, 0x66, 0x05, 0x6a, 0xa9, + 0x2e, 0x01, 0x38, 0xc1, 0xa1, 0xdd, 0x08, 0x89, 0xd3, 0xba, 0xed, 0x7b, 0x07, 0xe5, 0xa1, 0xcb, + 0xd6, 0x95, 0xb1, 0xa4, 0x1b, 0x58, 0x94, 0x63, 0x85, 0x61, 0xff, 0x70, 0x01, 0xc6, 0x16, 0xb7, + 0xb6, 0x5c, 0xdf, 0x8d, 0x0f, 0xd0, 0x5d, 0x98, 0xf0, 0x83, 0x16, 0x91, 0xff, 0xd9, 0x57, 0x8c, + 0x5f, 0xbd, 0x3c, 0xdf, 0xbd, 0x32, 0xe7, 0xd7, 0x35, 0xbc, 0xa5, 0x99, 0xa3, 0xc3, 0xca, 0x84, + 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x78, 0x3b, 0x68, 0x29, 0xb2, 0x05, 0x46, 0xb6, 0x92, 0x45, + 0xb6, 0x9e, 0xa0, 0x2d, 0x4d, 0x1f, 0x1d, 0x56, 0xc6, 0xb5, 0x02, 0xac, 0x13, 0x41, 0x9b, 0x30, + 0x4d, 0xff, 0xfa, 0xb1, 0xab, 0xe8, 0x16, 0x19, 0xdd, 0x67, 0xf2, 0xe8, 0x6a, 0xa8, 0x4b, 0xe7, + 0x8e, 0x0e, 0x2b, 0xd3, 0xa9, 0x42, 0x9c, 0x26, 0x68, 0x7f, 0x08, 0x53, 0x8b, 0x71, 0xec, 0x34, + 0x77, 0x48, 0x8b, 0xcf, 0x20, 0x7a, 0x0d, 0x86, 0x7c, 0x67, 0x8f, 0x88, 0xf9, 0xbd, 0x2c, 0x06, + 0x76, 0x68, 0xdd, 0xd9, 0x23, 0xc7, 0x87, 0x95, 0x99, 0x3b, 0xbe, 0xfb, 0x41, 0x47, 0xac, 0x0a, + 0x5a, 0x86, 0x19, 0x36, 0xba, 0x0a, 0xd0, 0x22, 0xfb, 0x6e, 0x93, 0xd4, 0x9d, 0x78, 0x47, 0xcc, + 0x37, 0x12, 0x75, 0xa1, 0xaa, 0x20, 0x58, 0xc3, 0xb2, 0x1f, 0x40, 0x69, 0x71, 0x3f, 0x70, 0x5b, + 0xf5, 0xa0, 0x15, 0xa1, 0x5d, 0x98, 0x6e, 0x87, 0x64, 0x8b, 0x84, 0xaa, 0xa8, 0x6c, 0x5d, 0x2e, + 0x5e, 0x19, 0xbf, 0x7a, 0x25, 0xf3, 0x63, 0x4d, 0xd4, 0x15, 0x3f, 0x0e, 0x0f, 0x96, 0x1e, 0x17, + 0xed, 0x4d, 0xa7, 0xa0, 0x38, 0x4d, 0xd9, 0xfe, 0x77, 0x05, 0xb8, 0xb0, 0xf8, 0x61, 0x27, 0x24, + 0x55, 0x37, 0xda, 0x4d, 0xaf, 0xf0, 0x96, 0x1b, 0xed, 0xae, 0x27, 0x23, 0xa0, 0x96, 0x56, 0x55, + 0x94, 0x63, 0x85, 0x81, 0x5e, 0x86, 0x51, 0xfa, 0xfb, 0x0e, 0xae, 0x89, 0x4f, 0x3e, 0x27, 0x90, + 0xc7, 0xab, 0x4e, 0xec, 0x54, 0x39, 0x08, 0x4b, 0x1c, 0xb4, 0x06, 0xe3, 0x4d, 0xb6, 0x21, 0xb7, + 0xd7, 0x82, 0x16, 0x61, 0x93, 0x59, 0x5a, 0x7a, 0x91, 0xa2, 0x2f, 0x27, 0xc5, 0xc7, 0x87, 0x95, + 0x32, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x0d, 0x31, 0x4a, 0x90, + 0xb1, 0xb7, 0xae, 0x68, 0x5b, 0x65, 0x98, 0x6d, 0x95, 0x89, 0xec, 0x6d, 0x82, 0x5e, 0x81, 0xa1, + 0x5d, 0xd7, 0x6f, 0x95, 0x47, 0x18, 0xad, 0xa7, 0xe8, 0x9c, 0xdf, 0x74, 0xfd, 0xd6, 0xf1, 0x61, + 0x65, 0xd6, 0xe8, 0x0e, 0x2d, 0xc4, 0x0c, 0xd5, 0xfe, 0x23, 0x0b, 0x2a, 0x0c, 0xb6, 0xea, 0x7a, + 0xa4, 0x4e, 0xc2, 0xc8, 0x8d, 0x62, 0xe2, 0xc7, 0xc6, 0x80, 0x5e, 0x05, 0x88, 0x48, 0x33, 0x24, + 0xb1, 0x36, 0xa4, 0x6a, 0x61, 0x34, 0x14, 0x04, 0x6b, 0x58, 0x94, 0x21, 0x44, 0x3b, 0x4e, 0xc8, + 0xd6, 0x97, 0x18, 0x58, 0xc5, 0x10, 0x1a, 0x12, 0x80, 0x13, 0x1c, 0x83, 0x21, 0x14, 0xfb, 0x31, + 0x04, 0xf4, 0x45, 0x98, 0x4e, 0x1a, 0x8b, 0xda, 0x4e, 0x53, 0x0e, 0x20, 0xdb, 0x32, 0x0d, 0x13, + 0x84, 0xd3, 0xb8, 0xf6, 0x3f, 0xb4, 0xc4, 0xe2, 0xa1, 0x5f, 0xfd, 0x09, 0xff, 0x56, 0xfb, 0x17, + 0x2c, 0x18, 0x5d, 0x72, 0xfd, 0x96, 0xeb, 0x6f, 0xa3, 0xaf, 0xc2, 0x18, 0x3d, 0x9b, 0x5a, 0x4e, + 0xec, 0x08, 0xbe, 0xf7, 0x59, 0x6d, 0x6f, 0xa9, 0xa3, 0x62, 0xbe, 0xbd, 0xbb, 0x4d, 0x0b, 0xa2, + 0x79, 0x8a, 0x4d, 0x77, 0xdb, 0xed, 0xcd, 0xf7, 0x49, 0x33, 0x5e, 0x23, 0xb1, 0x93, 0x7c, 0x4e, + 0x52, 0x86, 0x15, 0x55, 0x74, 0x13, 0x46, 0x62, 0x27, 0xdc, 0x26, 0xb1, 0x60, 0x80, 0x99, 0x8c, + 0x8a, 0xd7, 0xc4, 0x74, 0x47, 0x12, 0xbf, 0x49, 0x92, 0x63, 0x61, 0x83, 0x55, 0xc5, 0x82, 0x84, + 0xfd, 0x53, 0x16, 0x5c, 0x5c, 0x6e, 0xd4, 0x72, 0xd6, 0xd5, 0xb3, 0x30, 0xd2, 0x0a, 0xdd, 0x7d, + 0x12, 0x8a, 0x71, 0x56, 0x54, 0xaa, 0xac, 0x14, 0x0b, 0x28, 0xba, 0x06, 0x13, 0xfc, 0x40, 0xba, + 0xe1, 0xf8, 0x2d, 0x4f, 0x0e, 0xf1, 0x79, 0x81, 0x3d, 0x71, 0x57, 0x83, 0x61, 0x03, 0xf3, 0x84, + 0x03, 0xdd, 0x84, 0x89, 0x65, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0x42, 0xcf, 0x41, + 0xd1, 0x69, 0xb5, 0x18, 0x0f, 0x2b, 0x2d, 0x5d, 0x38, 0x3a, 0xac, 0x14, 0x17, 0x5b, 0x74, 0x33, + 0x81, 0xc2, 0x3a, 0xc0, 0x14, 0x03, 0xbd, 0x00, 0x43, 0xad, 0x30, 0x68, 0x97, 0x0b, 0x0c, 0xf3, + 0x31, 0xba, 0xef, 0xaa, 0x61, 0xd0, 0x4e, 0xa1, 0x32, 0x1c, 0xfb, 0x57, 0x0a, 0xf0, 0xe4, 0x32, + 0x69, 0xef, 0xac, 0x36, 0x72, 0x46, 0xe5, 0x0a, 0x8c, 0xed, 0x05, 0xbe, 0x1b, 0x07, 0x61, 0x24, + 0x9a, 0x66, 0xdb, 0x7d, 0x4d, 0x94, 0x61, 0x05, 0x45, 0x97, 0x61, 0xa8, 0x9d, 0xb0, 0xea, 0x09, + 0xc9, 0xe6, 0x19, 0x93, 0x66, 0x10, 0x8a, 0xd1, 0x89, 0x48, 0x28, 0xd8, 0x94, 0xc2, 0xb8, 0x13, + 0x91, 0x10, 0x33, 0x48, 0xb2, 0xde, 0xe9, 0x4e, 0x10, 0x7b, 0x28, 0xb5, 0xde, 0x29, 0x04, 0x6b, + 0x58, 0xa8, 0x0e, 0x25, 0xfe, 0x0f, 0x93, 0x2d, 0xc6, 0x91, 0x72, 0x56, 0x49, 0x43, 0x22, 0x89, + 0x55, 0x32, 0xc9, 0x36, 0x84, 0x2c, 0xc4, 0x09, 0x11, 0x63, 0x9e, 0x46, 0xfa, 0xce, 0xd3, 0x2f, + 0x15, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, 0x07, 0x2e, 0xf3, 0x68, 0xbc, 0x15, + 0x34, 0x1d, 0x2f, 0xbd, 0xc7, 0x4e, 0x6b, 0xf4, 0x7e, 0xc8, 0x02, 0xb4, 0xec, 0xfa, 0x2d, 0x12, + 0x9e, 0x81, 0x5c, 0x78, 0xb2, 0x0d, 0x78, 0x0b, 0xa6, 0x96, 0x3d, 0x97, 0xf8, 0x71, 0xad, 0xbe, + 0x1c, 0xf8, 0x5b, 0xee, 0x36, 0xfa, 0x3c, 0x4c, 0x51, 0x31, 0x39, 0xe8, 0xc4, 0x0d, 0xd2, 0x0c, + 0x7c, 0x26, 0x51, 0x50, 0xe1, 0x12, 0x1d, 0x1d, 0x56, 0xa6, 0x36, 0x0c, 0x08, 0x4e, 0x61, 0xda, + 0xbf, 0x43, 0x3f, 0x34, 0xd8, 0x6b, 0x07, 0x3e, 0xf1, 0xe3, 0xe5, 0xc0, 0x6f, 0x71, 0xc9, 0xf3, + 0xf3, 0x30, 0x14, 0xd3, 0x8e, 0xf3, 0x8f, 0x7c, 0x56, 0x4e, 0x2d, 0xed, 0xee, 0xf1, 0x61, 0xe5, + 0xb1, 0xee, 0x1a, 0xec, 0x83, 0x58, 0x1d, 0xf4, 0x2d, 0x30, 0x12, 0xc5, 0x4e, 0xdc, 0x89, 0xc4, + 0x67, 0x3f, 0x2d, 0x3f, 0xbb, 0xc1, 0x4a, 0x8f, 0x0f, 0x2b, 0xd3, 0xaa, 0x1a, 0x2f, 0xc2, 0xa2, + 0x02, 0x7a, 0x1e, 0x46, 0xf7, 0x48, 0x14, 0x39, 0xdb, 0x52, 0x68, 0x98, 0x16, 0x75, 0x47, 0xd7, + 0x78, 0x31, 0x96, 0x70, 0xf4, 0x0c, 0x0c, 0x93, 0x30, 0x0c, 0x42, 0xb1, 0xaa, 0x26, 0x05, 0xe2, + 0xf0, 0x0a, 0x2d, 0xc4, 0x1c, 0x66, 0xff, 0x07, 0x0b, 0xa6, 0x55, 0x5f, 0x79, 0x5b, 0x67, 0x70, + 0x3a, 0xbc, 0x0b, 0xd0, 0x94, 0x1f, 0x18, 0x31, 0x7e, 0x37, 0x7e, 0xf5, 0xd9, 0xac, 0x25, 0xdc, + 0x3d, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0x2f, 0x2d, 0x38, 0x97, 0xfa, 0xa2, + 0x5b, 0x6e, 0x14, 0xa3, 0xf7, 0xba, 0xbe, 0x6a, 0x7e, 0xb0, 0xaf, 0xa2, 0xb5, 0xd9, 0x37, 0xa9, + 0x35, 0x27, 0x4b, 0xb4, 0x2f, 0xba, 0x01, 0xc3, 0x6e, 0x4c, 0xf6, 0xe4, 0xc7, 0x3c, 0xd3, 0xf3, + 0x63, 0x78, 0xaf, 0x92, 0x19, 0xa9, 0xd1, 0x9a, 0x98, 0x13, 0xb0, 0xff, 0x97, 0x05, 0x25, 0xbe, + 0x6c, 0xd7, 0x9c, 0xf6, 0x19, 0xcc, 0x45, 0x0d, 0x86, 0x18, 0x75, 0xde, 0xf1, 0xe7, 0xb2, 0x3b, + 0x2e, 0xba, 0x33, 0x4f, 0x45, 0x3f, 0x2e, 0x62, 0x2b, 0x66, 0x46, 0x8b, 0x30, 0x23, 0x31, 0xf7, + 0x06, 0x94, 0x14, 0x02, 0x9a, 0x81, 0xe2, 0x2e, 0xe1, 0xd7, 0xaa, 0x12, 0xa6, 0x3f, 0xd1, 0x79, + 0x18, 0xde, 0x77, 0xbc, 0x8e, 0xd8, 0xec, 0x98, 0xff, 0xf9, 0x7c, 0xe1, 0x9a, 0x65, 0xff, 0x22, + 0xdb, 0x63, 0xa2, 0x91, 0x15, 0x7f, 0x5f, 0x30, 0x93, 0x0f, 0xe1, 0xbc, 0x97, 0xc1, 0xc3, 0xc4, + 0x40, 0x0c, 0xce, 0xf3, 0x9e, 0x14, 0x7d, 0x3d, 0x9f, 0x05, 0xc5, 0x99, 0x6d, 0xd0, 0x63, 0x20, + 0x68, 0xd3, 0x15, 0xe5, 0x78, 0xac, 0xbf, 0x42, 0x5c, 0xbe, 0x2d, 0xca, 0xb0, 0x82, 0x52, 0x06, + 0x71, 0x5e, 0x75, 0xfe, 0x26, 0x39, 0x68, 0x10, 0x8f, 0x34, 0xe3, 0x20, 0xfc, 0x58, 0xbb, 0xff, + 0x14, 0x1f, 0x7d, 0xce, 0x5f, 0xc6, 0x05, 0x81, 0xe2, 0x4d, 0x72, 0xc0, 0xa7, 0x42, 0xff, 0xba, + 0x62, 0xcf, 0xaf, 0xfb, 0x69, 0x0b, 0x26, 0xd5, 0xd7, 0x9d, 0xc1, 0x46, 0x5a, 0x32, 0x37, 0xd2, + 0x53, 0x3d, 0xd7, 0x63, 0xce, 0x16, 0xfa, 0x53, 0xc6, 0x02, 0x04, 0x4e, 0x3d, 0x0c, 0xe8, 0xd0, + 0x50, 0x9e, 0xfd, 0x71, 0x4e, 0xc8, 0x20, 0xdf, 0x75, 0x93, 0x1c, 0x6c, 0x04, 0x54, 0x7c, 0xc8, + 0xfe, 0x2e, 0x63, 0xd6, 0x86, 0x7a, 0xce, 0xda, 0xcf, 0x16, 0xe0, 0x82, 0x1a, 0x01, 0xe3, 0x80, + 0xfe, 0xb3, 0x3e, 0x06, 0xaf, 0xc0, 0x78, 0x8b, 0x6c, 0x39, 0x1d, 0x2f, 0x56, 0x37, 0xe7, 0x61, + 0xae, 0x3d, 0xa9, 0x26, 0xc5, 0x58, 0xc7, 0x39, 0xc1, 0xb0, 0xfd, 0xf8, 0x38, 0xe3, 0xbd, 0xb1, + 0x43, 0x57, 0x30, 0x95, 0xde, 0x34, 0xfd, 0xc7, 0x84, 0xae, 0xff, 0x10, 0xba, 0x8e, 0x67, 0x60, + 0xd8, 0xdd, 0xa3, 0x67, 0x71, 0xc1, 0x3c, 0x62, 0x6b, 0xb4, 0x10, 0x73, 0x18, 0xfa, 0x0c, 0x8c, + 0x36, 0x83, 0xbd, 0x3d, 0xc7, 0x6f, 0x95, 0x8b, 0x4c, 0x9e, 0x1c, 0xa7, 0xc7, 0xf5, 0x32, 0x2f, + 0xc2, 0x12, 0x86, 0x9e, 0x84, 0x21, 0x27, 0xdc, 0x8e, 0xca, 0x43, 0x0c, 0x67, 0x8c, 0xb6, 0xb4, + 0x18, 0x6e, 0x47, 0x98, 0x95, 0x52, 0x39, 0xf1, 0x7e, 0x10, 0xee, 0xba, 0xfe, 0x76, 0xd5, 0x0d, + 0x99, 0xd0, 0xa7, 0xc9, 0x89, 0xf7, 0x14, 0x04, 0x6b, 0x58, 0x68, 0x15, 0x86, 0xdb, 0x41, 0x18, + 0x47, 0xe5, 0x11, 0x36, 0xdc, 0x4f, 0xe7, 0x6c, 0x25, 0xfe, 0xb5, 0xf5, 0x20, 0x8c, 0x93, 0x0f, + 0xa0, 0xff, 0x22, 0xcc, 0xab, 0xa3, 0x6f, 0x81, 0x22, 0xf1, 0xf7, 0xcb, 0xa3, 0x8c, 0xca, 0x5c, + 0x16, 0x95, 0x15, 0x7f, 0xff, 0xae, 0x13, 0x26, 0x7c, 0x66, 0xc5, 0xdf, 0xc7, 0xb4, 0x0e, 0xfa, + 0x32, 0x94, 0xa4, 0xee, 0x34, 0x2a, 0x8f, 0xe5, 0x2f, 0x31, 0x2c, 0x90, 0x30, 0xf9, 0xa0, 0xe3, + 0x86, 0x64, 0x8f, 0xf8, 0x71, 0x94, 0xdc, 0x7e, 0x25, 0x34, 0xc2, 0x09, 0x35, 0xf4, 0x65, 0x79, + 0x9d, 0x5b, 0x0b, 0x3a, 0x7e, 0x1c, 0x95, 0x4b, 0xac, 0x7b, 0x99, 0x8a, 0xb6, 0xbb, 0x09, 0x5e, + 0xfa, 0xbe, 0xc7, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xe9, 0xb9, 0xfb, 0xc4, 0x27, 0x51, 0x54, + 0x0f, 0x83, 0x4d, 0x52, 0x06, 0xd6, 0xf3, 0x8b, 0xd9, 0xfa, 0xa7, 0x60, 0x93, 0x2c, 0xcd, 0x1e, + 0x1d, 0x56, 0x26, 0x6f, 0xe9, 0x75, 0xb0, 0x49, 0x02, 0xdd, 0x81, 0x29, 0x2a, 0xa0, 0xba, 0x09, + 0xd1, 0xf1, 0x7e, 0x44, 0x99, 0x74, 0x8a, 0x8d, 0x4a, 0x38, 0x45, 0x04, 0xbd, 0x0d, 0x25, 0xcf, + 0xdd, 0x22, 0xcd, 0x83, 0xa6, 0x47, 0xca, 0x13, 0x8c, 0x62, 0xe6, 0xb6, 0xba, 0x25, 0x91, 0xf8, + 0x05, 0x40, 0xfd, 0xc5, 0x49, 0x75, 0x74, 0x17, 0x1e, 0x8b, 0x49, 0xb8, 0xe7, 0xfa, 0x0e, 0xdd, + 0x0e, 0x42, 0x9e, 0x64, 0x5a, 0xbc, 0x49, 0xb6, 0xde, 0x2e, 0x89, 0xa1, 0x7b, 0x6c, 0x23, 0x13, + 0x0b, 0xe7, 0xd4, 0x46, 0xb7, 0x61, 0x9a, 0xed, 0x84, 0x7a, 0xc7, 0xf3, 0xea, 0x81, 0xe7, 0x36, + 0x0f, 0xca, 0x53, 0x8c, 0xe0, 0x67, 0xa4, 0x9a, 0xae, 0x66, 0x82, 0xe9, 0x8d, 0x37, 0xf9, 0x87, + 0xd3, 0xb5, 0xd1, 0x26, 0x53, 0xdb, 0x74, 0x42, 0x37, 0x3e, 0xa0, 0xeb, 0x97, 0x3c, 0x88, 0xcb, + 0xd3, 0x3d, 0xef, 0x8f, 0x3a, 0xaa, 0xd2, 0xed, 0xe8, 0x85, 0x38, 0x4d, 0x90, 0x6e, 0xed, 0x28, + 0x6e, 0xb9, 0x7e, 0x79, 0x86, 0x71, 0x0c, 0xb5, 0x33, 0x1a, 0xb4, 0x10, 0x73, 0x18, 0x53, 0xd9, + 0xd0, 0x1f, 0xb7, 0x29, 0x07, 0x9d, 0x65, 0x88, 0x89, 0xca, 0x46, 0x02, 0x70, 0x82, 0x43, 0x8f, + 0xe5, 0x38, 0x3e, 0x28, 0x23, 0x86, 0xaa, 0xb6, 0xcb, 0xc6, 0xc6, 0x97, 0x31, 0x2d, 0x47, 0xb7, + 0x60, 0x94, 0xf8, 0xfb, 0xab, 0x61, 0xb0, 0x57, 0x3e, 0x97, 0xbf, 0x67, 0x57, 0x38, 0x0a, 0x67, + 0xe8, 0xc9, 0x05, 0x40, 0x14, 0x63, 0x49, 0x02, 0x3d, 0x80, 0x72, 0xc6, 0x8c, 0xf0, 0x09, 0x38, + 0xcf, 0x26, 0xe0, 0x0b, 0xa2, 0x6e, 0x79, 0x23, 0x07, 0xef, 0xb8, 0x07, 0x0c, 0xe7, 0x52, 0x47, + 0xdf, 0x01, 0x93, 0x7c, 0x43, 0x71, 0x7d, 0x6f, 0x54, 0xbe, 0xc0, 0xbe, 0xe6, 0x72, 0xfe, 0xe6, + 0xe4, 0x88, 0x4b, 0x17, 0x44, 0x87, 0x26, 0xf5, 0xd2, 0x08, 0x9b, 0xd4, 0xec, 0x4d, 0x98, 0x52, + 0x7c, 0x8b, 0x2d, 0x1d, 0x54, 0x81, 0x61, 0xca, 0x90, 0xe5, 0x8d, 0xbd, 0x44, 0x67, 0x8a, 0xe9, + 0xe9, 0x30, 0x2f, 0x67, 0x33, 0xe5, 0x7e, 0x48, 0x96, 0x0e, 0x62, 0xc2, 0x6f, 0x5d, 0x45, 0x6d, + 0xa6, 0x24, 0x00, 0x27, 0x38, 0xf6, 0xff, 0xe3, 0x72, 0x4f, 0xc2, 0x1c, 0x07, 0x38, 0x0e, 0x5e, + 0x82, 0xb1, 0x9d, 0x20, 0x8a, 0x29, 0x36, 0x6b, 0x63, 0x38, 0x91, 0x74, 0x6e, 0x88, 0x72, 0xac, + 0x30, 0xd0, 0x9b, 0x30, 0xd9, 0xd4, 0x1b, 0x10, 0x67, 0x99, 0x1a, 0x02, 0xa3, 0x75, 0x6c, 0xe2, + 0xa2, 0x6b, 0x30, 0xc6, 0x5e, 0x6b, 0x9a, 0x81, 0x27, 0xee, 0x77, 0xf2, 0x40, 0x1e, 0xab, 0x8b, + 0xf2, 0x63, 0xed, 0x37, 0x56, 0xd8, 0xf4, 0xce, 0x4d, 0xbb, 0x50, 0xab, 0x8b, 0x53, 0x44, 0xdd, + 0xb9, 0x6f, 0xb0, 0x52, 0x2c, 0xa0, 0xf6, 0xdf, 0x28, 0x68, 0xa3, 0x4c, 0x6f, 0x2c, 0x04, 0xd5, + 0x61, 0xf4, 0xbe, 0xe3, 0xc6, 0xae, 0xbf, 0x2d, 0xc4, 0x85, 0xe7, 0x7b, 0x1e, 0x29, 0xac, 0xd2, + 0x3d, 0x5e, 0x81, 0x1f, 0x7a, 0xe2, 0x0f, 0x96, 0x64, 0x28, 0xc5, 0xb0, 0xe3, 0xfb, 0x94, 0x62, + 0x61, 0x50, 0x8a, 0x98, 0x57, 0xe0, 0x14, 0xc5, 0x1f, 0x2c, 0xc9, 0xa0, 0xf7, 0x00, 0xe4, 0xb2, + 0x24, 0x2d, 0xf1, 0x4a, 0xf2, 0x52, 0x7f, 0xa2, 0x1b, 0xaa, 0xce, 0xd2, 0x14, 0x3d, 0x52, 0x93, + 0xff, 0x58, 0xa3, 0x67, 0xc7, 0x4c, 0xac, 0xea, 0xee, 0x0c, 0xfa, 0x76, 0xca, 0x09, 0x9c, 0x30, + 0x26, 0xad, 0xc5, 0x58, 0x0c, 0xce, 0x0b, 0x83, 0x49, 0xc5, 0x1b, 0xee, 0x1e, 0xd1, 0xb9, 0x86, + 0x20, 0x82, 0x13, 0x7a, 0xf6, 0xcf, 0x17, 0xa1, 0x9c, 0xd7, 0x5d, 0xba, 0xe8, 0xc8, 0x03, 0x37, + 0x5e, 0xa6, 0xd2, 0x90, 0x65, 0x2e, 0xba, 0x15, 0x51, 0x8e, 0x15, 0x06, 0x9d, 0xfd, 0xc8, 0xdd, + 0x96, 0x97, 0x9a, 0xe1, 0x64, 0xf6, 0x1b, 0xac, 0x14, 0x0b, 0x28, 0xc5, 0x0b, 0x89, 0x13, 0x89, + 0x67, 0x38, 0x6d, 0x95, 0x60, 0x56, 0x8a, 0x05, 0x54, 0xd7, 0x47, 0x0c, 0xf5, 0xd1, 0x47, 0x18, + 0x43, 0x34, 0x7c, 0xba, 0x43, 0x84, 0xbe, 0x02, 0xb0, 0xe5, 0xfa, 0x6e, 0xb4, 0xc3, 0xa8, 0x8f, + 0x9c, 0x98, 0xba, 0x92, 0xa5, 0x56, 0x15, 0x15, 0xac, 0x51, 0x44, 0xaf, 0xc3, 0xb8, 0xda, 0x80, + 0xb5, 0x6a, 0x79, 0xd4, 0x7c, 0xe3, 0x49, 0xb8, 0x51, 0x15, 0xeb, 0x78, 0xf6, 0xfb, 0xe9, 0xf5, + 0x22, 0x76, 0x80, 0x36, 0xbe, 0xd6, 0xa0, 0xe3, 0x5b, 0xe8, 0x3d, 0xbe, 0xf6, 0xaf, 0x16, 0x61, + 0xda, 0x68, 0xac, 0x13, 0x0d, 0xc0, 0xb3, 0xae, 0xd3, 0x73, 0xce, 0x89, 0x89, 0xd8, 0x7f, 0x76, + 0xff, 0xad, 0xa2, 0x9f, 0x85, 0x74, 0x07, 0xf0, 0xfa, 0xe8, 0x2b, 0x50, 0xf2, 0x9c, 0x88, 0xe9, + 0x36, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0xee, 0x11, 0x4e, 0x14, 0x6b, 0x47, 0x0d, 0xa7, 0x9d, + 0x90, 0xa4, 0x07, 0x32, 0x95, 0x7d, 0xe4, 0x3b, 0xaf, 0xea, 0x04, 0x15, 0x90, 0x0e, 0x30, 0x87, + 0xa1, 0x6b, 0x30, 0x11, 0x12, 0xb6, 0x2a, 0x96, 0xa9, 0x28, 0xc7, 0x96, 0xd9, 0x70, 0x22, 0xf3, + 0x61, 0x0d, 0x86, 0x0d, 0xcc, 0x44, 0x94, 0x1f, 0xe9, 0x21, 0xca, 0x3f, 0x0f, 0xa3, 0xec, 0x87, + 0x5a, 0x01, 0x6a, 0x36, 0x6a, 0xbc, 0x18, 0x4b, 0x78, 0x7a, 0xc1, 0x8c, 0x0d, 0xb8, 0x60, 0x5e, + 0x80, 0xa9, 0xaa, 0x43, 0xf6, 0x02, 0x7f, 0xc5, 0x6f, 0xb5, 0x03, 0xd7, 0x8f, 0x51, 0x19, 0x86, + 0xd8, 0xe9, 0xc0, 0xf7, 0xf6, 0x10, 0xa5, 0x80, 0x87, 0xa8, 0x60, 0x6e, 0xff, 0x56, 0x01, 0x26, + 0xab, 0xc4, 0x23, 0x31, 0xe1, 0x57, 0x99, 0x08, 0xad, 0x02, 0xda, 0x0e, 0x9d, 0x26, 0xa9, 0x93, + 0xd0, 0x0d, 0x5a, 0xba, 0xae, 0xb3, 0xc8, 0xde, 0x13, 0xd0, 0xf5, 0x2e, 0x28, 0xce, 0xa8, 0x81, + 0xde, 0x85, 0xc9, 0x76, 0x48, 0x0c, 0x15, 0x9d, 0x95, 0x27, 0x8d, 0xd4, 0x75, 0x44, 0x2e, 0x08, + 0x1b, 0x45, 0xd8, 0x24, 0x85, 0xbe, 0x0d, 0x66, 0x82, 0xb0, 0xbd, 0xe3, 0xf8, 0x55, 0xd2, 0x26, + 0x7e, 0x8b, 0x4a, 0xfa, 0x42, 0x05, 0x71, 0xfe, 0xe8, 0xb0, 0x32, 0x73, 0x3b, 0x05, 0xc3, 0x5d, + 0xd8, 0xe8, 0x5d, 0x98, 0x6d, 0x87, 0x41, 0xdb, 0xd9, 0x66, 0x0b, 0x45, 0x08, 0x34, 0x9c, 0xfb, + 0xbc, 0x74, 0x74, 0x58, 0x99, 0xad, 0xa7, 0x81, 0xc7, 0x87, 0x95, 0x73, 0x6c, 0xa0, 0x68, 0x49, + 0x02, 0xc4, 0xdd, 0x64, 0xec, 0x6d, 0xb8, 0x50, 0x0d, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, 0x58, + 0xaf, 0x69, 0xba, 0x83, 0x75, 0x79, 0x77, 0xe5, 0x6f, 0xd1, 0x99, 0xe7, 0x94, 0x56, 0x93, 0xcb, + 0x2f, 0xab, 0xae, 0x47, 0x72, 0x74, 0x14, 0x7f, 0xbb, 0x60, 0xb4, 0x94, 0xe0, 0xab, 0x67, 0x05, + 0x2b, 0xf7, 0x59, 0xe1, 0x1d, 0x18, 0xdb, 0x72, 0x89, 0xd7, 0xc2, 0x64, 0x4b, 0xcc, 0xcc, 0x73, + 0xf9, 0xcf, 0x6b, 0xab, 0x14, 0x53, 0xea, 0xa4, 0xf8, 0xcd, 0x77, 0x55, 0x54, 0xc6, 0x8a, 0x0c, + 0xda, 0x85, 0x19, 0x79, 0xb5, 0x92, 0x50, 0xb1, 0x89, 0x9f, 0xef, 0x75, 0x5f, 0x33, 0x89, 0xb3, + 0x09, 0xc4, 0x29, 0x32, 0xb8, 0x8b, 0x30, 0xbd, 0xea, 0xee, 0xd1, 0xe3, 0x6a, 0x88, 0x2d, 0x69, + 0x76, 0xd5, 0x65, 0xb7, 0x76, 0x56, 0x6a, 0xff, 0xa8, 0x05, 0x8f, 0x77, 0x8d, 0x8c, 0xd0, 0x5e, + 0x9c, 0xf2, 0x2c, 0xa4, 0xb5, 0x09, 0x85, 0xfe, 0xda, 0x04, 0xfb, 0x1f, 0x59, 0x70, 0x7e, 0x65, + 0xaf, 0x1d, 0x1f, 0x54, 0x5d, 0xf3, 0xe9, 0xe3, 0x0d, 0x18, 0xd9, 0x23, 0x2d, 0xb7, 0xb3, 0x27, + 0x66, 0xae, 0x22, 0x59, 0xfa, 0x1a, 0x2b, 0x3d, 0x3e, 0xac, 0x4c, 0x36, 0xe2, 0x20, 0x74, 0xb6, + 0x09, 0x2f, 0xc0, 0x02, 0x9d, 0x1d, 0x8c, 0xee, 0x87, 0xe4, 0x96, 0xbb, 0xe7, 0xca, 0xe7, 0xd2, + 0x9e, 0x1a, 0xb5, 0x79, 0x39, 0xa0, 0xf3, 0xef, 0x74, 0x1c, 0x3f, 0x76, 0xe3, 0x03, 0xf1, 0xaa, + 0x23, 0x89, 0xe0, 0x84, 0x9e, 0xfd, 0x0d, 0x0b, 0xa6, 0x25, 0x2f, 0x59, 0x6c, 0xb5, 0x42, 0x12, + 0x45, 0x68, 0x0e, 0x0a, 0x6e, 0x5b, 0xf4, 0x12, 0x44, 0x2f, 0x0b, 0xb5, 0x3a, 0x2e, 0xb8, 0x6d, + 0x54, 0x87, 0x12, 0x7f, 0x75, 0x4d, 0x16, 0xd7, 0x40, 0x6f, 0xb7, 0xac, 0x07, 0x1b, 0xb2, 0x26, + 0x4e, 0x88, 0x48, 0xa9, 0x98, 0x9d, 0x43, 0x45, 0xf3, 0x49, 0xe8, 0x86, 0x28, 0xc7, 0x0a, 0x03, + 0x5d, 0x81, 0x31, 0x3f, 0x68, 0xf1, 0x47, 0x70, 0xbe, 0xa7, 0xd9, 0x92, 0x5d, 0x17, 0x65, 0x58, + 0x41, 0xed, 0xef, 0xb7, 0x60, 0x42, 0x7e, 0xd9, 0x80, 0x02, 0x3a, 0xdd, 0x5a, 0x89, 0x70, 0x9e, + 0x6c, 0x2d, 0x2a, 0x60, 0x33, 0x88, 0x21, 0x57, 0x17, 0x4f, 0x22, 0x57, 0xdb, 0x3f, 0x52, 0x80, + 0x29, 0xd9, 0x9d, 0x46, 0x67, 0x33, 0x22, 0x31, 0xda, 0x80, 0x92, 0xc3, 0x87, 0x9c, 0xc8, 0x15, + 0xfb, 0x4c, 0xf6, 0x85, 0xce, 0x98, 0x9f, 0x44, 0xd4, 0x59, 0x94, 0xb5, 0x71, 0x42, 0x08, 0x79, + 0x30, 0xeb, 0x07, 0x31, 0x3b, 0xf6, 0x14, 0xbc, 0xd7, 0xb3, 0x43, 0x9a, 0xfa, 0x45, 0x41, 0x7d, + 0x76, 0x3d, 0x4d, 0x05, 0x77, 0x13, 0x46, 0x2b, 0x52, 0x89, 0x54, 0xcc, 0xbf, 0xc2, 0xe9, 0xb3, + 0x90, 0xad, 0x43, 0xb2, 0x7f, 0xd9, 0x82, 0x92, 0x44, 0x3b, 0x8b, 0x17, 0xa6, 0x35, 0x18, 0x8d, + 0xd8, 0x24, 0xc8, 0xa1, 0xb1, 0x7b, 0x75, 0x9c, 0xcf, 0x57, 0x72, 0x9a, 0xf3, 0xff, 0x11, 0x96, + 0x34, 0x98, 0x16, 0x5c, 0x75, 0xff, 0x13, 0xa2, 0x05, 0x57, 0xfd, 0xc9, 0x39, 0x61, 0xfe, 0x1b, + 0xeb, 0xb3, 0xa6, 0x2a, 0xa0, 0x42, 0x67, 0x3b, 0x24, 0x5b, 0xee, 0x83, 0xb4, 0xd0, 0x59, 0x67, + 0xa5, 0x58, 0x40, 0xd1, 0x7b, 0x30, 0xd1, 0x94, 0xca, 0xe3, 0x84, 0x0d, 0x3c, 0xdb, 0x53, 0x15, + 0xaf, 0x5e, 0x6d, 0xb8, 0x81, 0xdc, 0xb2, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0x77, 0xff, 0x62, 0xbf, + 0x77, 0xff, 0x84, 0x6e, 0xee, 0xcb, 0xb5, 0xfd, 0x63, 0x16, 0x8c, 0x70, 0x15, 0xe4, 0x60, 0x3a, + 0x5b, 0xed, 0x15, 0x2a, 0x19, 0xbb, 0xbb, 0xb4, 0x50, 0x3c, 0x4a, 0xa1, 0x35, 0x28, 0xb1, 0x1f, + 0x4c, 0x15, 0x53, 0xcc, 0xb7, 0x0c, 0xe4, 0xad, 0xea, 0x1d, 0xbc, 0x2b, 0xab, 0xe1, 0x84, 0x82, + 0xfd, 0x83, 0x45, 0xca, 0xaa, 0x12, 0x54, 0xe3, 0x04, 0xb7, 0x1e, 0xdd, 0x09, 0x5e, 0x78, 0x54, + 0x27, 0xf8, 0x36, 0x4c, 0x37, 0xb5, 0x27, 0xaf, 0x64, 0x26, 0xaf, 0xf4, 0x5c, 0x24, 0xda, 0xeb, + 0x18, 0x57, 0xc3, 0x2d, 0x9b, 0x44, 0x70, 0x9a, 0x2a, 0xfa, 0x76, 0x98, 0xe0, 0xf3, 0x2c, 0x5a, + 0x19, 0x62, 0xad, 0x7c, 0x26, 0x7f, 0xbd, 0xe8, 0x4d, 0xb0, 0x95, 0xd8, 0xd0, 0xaa, 0x63, 0x83, + 0x98, 0xfd, 0xf3, 0x63, 0x30, 0xbc, 0xb2, 0x4f, 0xfc, 0xf8, 0x0c, 0x18, 0x52, 0x13, 0xa6, 0x5c, + 0x7f, 0x3f, 0xf0, 0xf6, 0x49, 0x8b, 0xc3, 0x4f, 0x72, 0xb8, 0x3e, 0x26, 0x48, 0x4f, 0xd5, 0x0c, + 0x12, 0x38, 0x45, 0xf2, 0x51, 0xdc, 0xda, 0xaf, 0xc3, 0x08, 0x9f, 0x7b, 0x71, 0x65, 0xcf, 0x54, + 0xb0, 0xb3, 0x41, 0x14, 0xbb, 0x20, 0xd1, 0x28, 0x70, 0x8d, 0xbe, 0xa8, 0x8e, 0xde, 0x87, 0xa9, + 0x2d, 0x37, 0x8c, 0x62, 0x7a, 0xdd, 0x8e, 0x62, 0x67, 0xaf, 0xfd, 0x10, 0xb7, 0x74, 0x35, 0x0e, + 0xab, 0x06, 0x25, 0x9c, 0xa2, 0x8c, 0xb6, 0x61, 0x92, 0x5e, 0x1c, 0x93, 0xa6, 0x46, 0x4f, 0xdc, + 0x94, 0x52, 0xc3, 0xdd, 0xd2, 0x09, 0x61, 0x93, 0x2e, 0x65, 0x26, 0x4d, 0x76, 0xd1, 0x1c, 0x63, + 0x12, 0x85, 0x62, 0x26, 0xfc, 0x86, 0xc9, 0x61, 0x94, 0x27, 0x31, 0x53, 0x91, 0x92, 0xc9, 0x93, + 0x34, 0x83, 0x90, 0xaf, 0x42, 0x89, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0x63, 0xc3, 0xc2, 0x60, 0x7d, + 0x5d, 0x73, 0x9b, 0x61, 0x60, 0xea, 0x47, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, + 0x91, 0xd0, 0x25, 0x91, 0x78, 0x76, 0xe8, 0x31, 0x8d, 0x0c, 0x8d, 0x9b, 0x90, 0xf2, 0xdf, 0x58, + 0x54, 0xa5, 0xcb, 0xcb, 0x61, 0xb7, 0x21, 0xf6, 0xd2, 0xa0, 0x2d, 0xaf, 0x45, 0x56, 0x8a, 0x05, + 0x14, 0xbd, 0x0d, 0xa3, 0x21, 0xf1, 0x98, 0x02, 0x6e, 0x72, 0xf0, 0x45, 0xce, 0xf5, 0x79, 0xbc, + 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x21, 0xa1, 0x32, 0x84, 0xeb, 0x6f, 0x2b, 0x03, 0x0a, 0xf1, + 0x7e, 0xf0, 0x84, 0x68, 0xff, 0x1c, 0x4e, 0x30, 0xfc, 0x38, 0x0c, 0x3c, 0x8f, 0x84, 0x38, 0xa3, + 0x1a, 0xba, 0x0e, 0xb3, 0xaa, 0xb4, 0xe6, 0x47, 0xb1, 0xe3, 0x37, 0x09, 0x7b, 0x3a, 0x28, 0x25, + 0x52, 0x11, 0x4e, 0x23, 0xe0, 0xee, 0x3a, 0xf6, 0xd7, 0xa9, 0x38, 0x43, 0x47, 0xeb, 0x0c, 0x64, + 0x81, 0xb7, 0x4c, 0x59, 0xe0, 0x62, 0xee, 0xcc, 0xe5, 0xc8, 0x01, 0x47, 0x16, 0x8c, 0x6b, 0x33, + 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, 0xc0, 0x0c, 0x5d, 0xe9, 0xb7, 0x37, 0x99, 0x37, 0x45, + 0x8b, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, 0xcc, 0xb2, 0x68, 0x60, 0xe6, 0x56, 0x8a, 0x20, 0xee, 0x6a, + 0x02, 0xbd, 0x21, 0xb5, 0x51, 0x45, 0xc3, 0x30, 0x8a, 0x6b, 0x9a, 0x8e, 0x0f, 0x2b, 0x33, 0xda, + 0x87, 0xe8, 0xda, 0x27, 0xfb, 0xab, 0xf2, 0x1b, 0x39, 0xb3, 0x59, 0x80, 0x52, 0x53, 0x2d, 0x16, + 0xcb, 0xb4, 0xa5, 0x55, 0xcb, 0x01, 0x27, 0x38, 0x74, 0x8f, 0xd2, 0x2b, 0x48, 0xda, 0x96, 0x8f, + 0x5e, 0x50, 0x30, 0x83, 0xd8, 0xaf, 0x02, 0xac, 0x3c, 0x20, 0x4d, 0xbe, 0xd4, 0xf5, 0x47, 0x5d, + 0x2b, 0xff, 0x51, 0xd7, 0xfe, 0x8f, 0x16, 0x4c, 0xad, 0x2e, 0x1b, 0xd7, 0xc4, 0x79, 0x00, 0x7e, + 0x37, 0xba, 0x77, 0x6f, 0x5d, 0xbe, 0x57, 0x70, 0x95, 0xb3, 0x2a, 0xc5, 0x1a, 0x06, 0xba, 0x08, + 0x45, 0xaf, 0xe3, 0x8b, 0x2b, 0xcb, 0xe8, 0xd1, 0x61, 0xa5, 0x78, 0xab, 0xe3, 0x63, 0x5a, 0xa6, + 0x99, 0xcf, 0x15, 0x07, 0x36, 0x9f, 0xeb, 0xeb, 0x25, 0x81, 0x2a, 0x30, 0x7c, 0xff, 0xbe, 0xdb, + 0x8a, 0xca, 0xc3, 0xc9, 0x5b, 0xca, 0xbd, 0x7b, 0xb5, 0x6a, 0x84, 0x79, 0xb9, 0xfd, 0x97, 0x8a, + 0x30, 0xb3, 0xea, 0x91, 0x07, 0x0f, 0x65, 0x85, 0x3b, 0xa8, 0xc9, 0xdf, 0x9d, 0x6e, 0x29, 0xf1, + 0xb4, 0x8d, 0x1c, 0xfb, 0x0f, 0xc5, 0x7b, 0x30, 0xca, 0x6d, 0x03, 0xf8, 0x60, 0x8c, 0x5f, 0x7d, + 0x25, 0xab, 0x0b, 0xe9, 0xb1, 0x98, 0x17, 0xea, 0x38, 0x6e, 0x28, 0xa5, 0x8e, 0x56, 0x51, 0x8a, + 0x25, 0xc9, 0xb9, 0xcf, 0xc3, 0x84, 0x8e, 0x79, 0x22, 0x8b, 0xa9, 0xbf, 0x6c, 0xc1, 0xb9, 0x55, + 0x2f, 0x68, 0xee, 0xa6, 0xec, 0x2f, 0x5f, 0x87, 0x71, 0xca, 0x34, 0x22, 0xc3, 0xf2, 0xdc, 0xf0, + 0x45, 0x10, 0x20, 0xac, 0xe3, 0x69, 0xd5, 0xee, 0xdc, 0xa9, 0x55, 0xb3, 0x5c, 0x18, 0x04, 0x08, + 0xeb, 0x78, 0xf6, 0x6f, 0x58, 0xf0, 0xd4, 0xf5, 0xe5, 0x95, 0xc4, 0x04, 0xb9, 0xcb, 0x8b, 0x82, + 0x5e, 0x39, 0x5a, 0x5a, 0x57, 0x92, 0x2b, 0x47, 0x95, 0xf5, 0x42, 0x40, 0x3f, 0x29, 0x1e, 0x42, + 0x3f, 0x69, 0xc1, 0xb9, 0xeb, 0x6e, 0x4c, 0xcf, 0x80, 0xb4, 0x3d, 0x3f, 0x3d, 0x04, 0x22, 0x37, + 0x0e, 0xc2, 0x83, 0xb4, 0x3d, 0x3f, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0xdd, 0x88, 0xf6, + 0xb4, 0x60, 0xea, 0x3d, 0xb0, 0x28, 0xc7, 0x0a, 0x83, 0x7e, 0x58, 0xcb, 0x0d, 0x99, 0xdc, 0x7a, + 0x20, 0xb6, 0xb3, 0xfa, 0xb0, 0xaa, 0x04, 0xe0, 0x04, 0xc7, 0xfe, 0x51, 0x0b, 0x2e, 0x5c, 0xf7, + 0x3a, 0x51, 0x4c, 0xc2, 0xad, 0xc8, 0xe8, 0xec, 0xab, 0x50, 0x22, 0xf2, 0x6e, 0x28, 0xfa, 0xaa, + 0xa4, 0x19, 0x75, 0x69, 0xe4, 0xce, 0x04, 0x0a, 0x6f, 0x00, 0x63, 0xe6, 0x93, 0x19, 0xe1, 0xfe, + 0x4c, 0x01, 0x26, 0x6f, 0x6c, 0x6c, 0xd4, 0xaf, 0x93, 0x58, 0xb0, 0xcc, 0xfe, 0x7a, 0x4d, 0xac, + 0xa9, 0x67, 0x7a, 0x49, 0xe0, 0x9d, 0xd8, 0xf5, 0xe6, 0xb9, 0xf7, 0xda, 0x7c, 0xcd, 0x8f, 0x6f, + 0x87, 0x8d, 0x38, 0x74, 0xfd, 0xed, 0x4c, 0x85, 0x8e, 0x64, 0xec, 0xc5, 0x3c, 0xc6, 0x8e, 0x5e, + 0x85, 0x11, 0xe6, 0x3e, 0x27, 0x65, 0xe1, 0x27, 0x94, 0x00, 0xcb, 0x4a, 0x8f, 0x0f, 0x2b, 0xa5, + 0x3b, 0xb8, 0xc6, 0xff, 0x60, 0x81, 0x8a, 0xee, 0xc0, 0xf8, 0x4e, 0x1c, 0xb7, 0x6f, 0x10, 0xa7, + 0x45, 0x42, 0xc9, 0x1d, 0x2e, 0x65, 0x71, 0x07, 0x3a, 0x08, 0x1c, 0x2d, 0xd9, 0x50, 0x49, 0x59, + 0x84, 0x75, 0x3a, 0x76, 0x03, 0x20, 0x81, 0x9d, 0xd2, 0x65, 0xd6, 0xfe, 0x7d, 0x0b, 0x46, 0xb9, + 0x27, 0x43, 0x88, 0xbe, 0x00, 0x43, 0xe4, 0x01, 0x69, 0x0a, 0x31, 0x25, 0xb3, 0xc3, 0xc9, 0x29, + 0xc7, 0x55, 0xb3, 0xf4, 0x3f, 0x66, 0xb5, 0xd0, 0x0d, 0x18, 0xa5, 0xbd, 0xbd, 0xae, 0xdc, 0x3a, + 0x9e, 0xce, 0xfb, 0x62, 0x35, 0xed, 0xfc, 0x60, 0x14, 0x45, 0x58, 0x56, 0x67, 0x6a, 0xc6, 0x66, + 0xbb, 0x41, 0x19, 0x58, 0xdc, 0x4b, 0x09, 0xb0, 0xb1, 0x5c, 0xe7, 0x48, 0x82, 0x1a, 0x57, 0x33, + 0xca, 0x42, 0x9c, 0x10, 0xb1, 0x37, 0xa0, 0x44, 0x27, 0x75, 0xd1, 0x73, 0x9d, 0xde, 0x1a, 0xce, + 0x17, 0xa1, 0x24, 0xb5, 0x8d, 0x91, 0xf0, 0xb5, 0x60, 0x54, 0xa5, 0x32, 0x32, 0xc2, 0x09, 0xdc, + 0xde, 0x82, 0xf3, 0xec, 0xe9, 0xde, 0x89, 0x77, 0x8c, 0x3d, 0xd6, 0x7f, 0x31, 0xbf, 0x24, 0xa4, + 0x7e, 0x3e, 0x33, 0x65, 0xcd, 0x38, 0x7c, 0x42, 0x52, 0x4c, 0x6e, 0x00, 0xf6, 0x1f, 0x0e, 0xc1, + 0x13, 0xb5, 0x46, 0xbe, 0x93, 0xcb, 0x35, 0x98, 0xe0, 0x32, 0x01, 0x5d, 0xda, 0x8e, 0x27, 0xda, + 0x55, 0x0f, 0x5b, 0x1b, 0x1a, 0x0c, 0x1b, 0x98, 0xe8, 0x29, 0x28, 0xba, 0x1f, 0xf8, 0x69, 0x4b, + 0xd0, 0xda, 0x3b, 0xeb, 0x98, 0x96, 0x53, 0x30, 0x15, 0x2f, 0x38, 0x2b, 0x55, 0x60, 0x25, 0x62, + 0xbc, 0x05, 0x53, 0x6e, 0xd4, 0x8c, 0xdc, 0x9a, 0x4f, 0xf9, 0x4c, 0xe2, 0x20, 0x95, 0xdc, 0x48, + 0x69, 0xa7, 0x15, 0x14, 0xa7, 0xb0, 0x35, 0xbe, 0x3e, 0x3c, 0xb0, 0x88, 0xd2, 0xd7, 0xf9, 0x80, + 0x4a, 0x5f, 0x6d, 0xf6, 0x75, 0x11, 0xb3, 0x4a, 0x13, 0xd2, 0x17, 0xff, 0xe0, 0x08, 0x4b, 0x18, + 0x15, 0xf7, 0x9b, 0x3b, 0x4e, 0x7b, 0xb1, 0x13, 0xef, 0x54, 0xdd, 0xa8, 0x19, 0xec, 0x93, 0xf0, + 0x80, 0xdd, 0xd4, 0xc6, 0x12, 0x71, 0x5f, 0x01, 0x96, 0x6f, 0x2c, 0xd6, 0x29, 0x26, 0xee, 0xae, + 0x63, 0xaa, 0xac, 0xe0, 0x34, 0x5c, 0x55, 0x16, 0x61, 0x5a, 0x36, 0xd3, 0x20, 0x11, 0x3b, 0x23, + 0xc6, 0x59, 0xc7, 0x94, 0xeb, 0xa2, 0x28, 0x56, 0xdd, 0x4a, 0xe3, 0xa3, 0x37, 0x60, 0xd2, 0xf5, + 0xdd, 0xd8, 0x75, 0xe2, 0x20, 0x64, 0x27, 0x2c, 0xbf, 0x94, 0xb1, 0x17, 0xb8, 0x9a, 0x0e, 0xc0, + 0x26, 0x9e, 0xfd, 0x07, 0x43, 0x30, 0xcb, 0xa6, 0xed, 0x9b, 0x2b, 0xec, 0x13, 0xb3, 0xc2, 0xee, + 0x74, 0xaf, 0xb0, 0xd3, 0x10, 0x77, 0x3f, 0xce, 0x65, 0xf6, 0x3e, 0x94, 0x94, 0x31, 0xaf, 0xb4, + 0x47, 0xb7, 0x72, 0xec, 0xd1, 0xfb, 0x4b, 0x1f, 0xf2, 0xcd, 0xb0, 0x98, 0xf9, 0x66, 0xf8, 0x77, + 0x2c, 0x48, 0x6c, 0x1a, 0xd1, 0x0d, 0x28, 0xb5, 0x03, 0x66, 0x37, 0x10, 0x4a, 0x63, 0x9c, 0x27, + 0x32, 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0xf1, 0xab, 0xcb, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, + 0x1d, 0x92, 0x46, 0xcc, 0xbc, 0xf2, 0xfa, 0xd2, 0xe1, 0x6b, 0x84, 0xe3, 0x63, 0x59, 0xd1, 0xfe, + 0x59, 0x0b, 0x80, 0x3f, 0xcb, 0x39, 0xfe, 0x36, 0x39, 0x03, 0x55, 0x63, 0x15, 0x86, 0xa2, 0x36, + 0x69, 0xf6, 0xb2, 0xe8, 0x48, 0xfa, 0xd3, 0x68, 0x93, 0x66, 0x32, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, + 0xed, 0xef, 0x01, 0x98, 0x4a, 0xd0, 0x6a, 0x31, 0xd9, 0x43, 0x2f, 0x1b, 0x3e, 0x4f, 0x17, 0x53, + 0x3e, 0x4f, 0x25, 0x86, 0xad, 0x69, 0xb5, 0xde, 0x87, 0xe2, 0x9e, 0xf3, 0x40, 0xa8, 0x2d, 0x5e, + 0xec, 0xdd, 0x0d, 0x4a, 0x7f, 0x7e, 0xcd, 0x79, 0xc0, 0xef, 0x4c, 0x2f, 0xca, 0x05, 0xb2, 0xe6, + 0x3c, 0x38, 0xe6, 0x76, 0x1b, 0x8c, 0x49, 0xdd, 0x72, 0xa3, 0xf8, 0x6b, 0xff, 0x25, 0xf9, 0xcf, + 0x96, 0x1d, 0x6d, 0x84, 0xb5, 0xe5, 0xfa, 0xe2, 0x91, 0x6a, 0xa0, 0xb6, 0x5c, 0x3f, 0xdd, 0x96, + 0xeb, 0x0f, 0xd0, 0x96, 0xeb, 0xa3, 0x0f, 0x61, 0x54, 0x3c, 0x08, 0x33, 0x63, 0x6d, 0x53, 0x25, + 0x92, 0xd7, 0x9e, 0x78, 0x4f, 0xe6, 0x6d, 0x2e, 0xc8, 0x3b, 0xa1, 0x28, 0xed, 0xdb, 0xae, 0x6c, + 0x10, 0xfd, 0x2d, 0x0b, 0xa6, 0xc4, 0x6f, 0x4c, 0x3e, 0xe8, 0x90, 0x28, 0x16, 0xb2, 0xe7, 0xe7, + 0x06, 0xef, 0x83, 0xa8, 0xc8, 0xbb, 0xf2, 0x39, 0xc9, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0xa5, 0x7a, + 0x81, 0xfe, 0x89, 0x05, 0xe7, 0xf7, 0x9c, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xd8, 0x0d, 0x84, + 0xf1, 0xf9, 0x17, 0x06, 0x9b, 0xfe, 0xae, 0xea, 0xbc, 0x93, 0xd2, 0x4e, 0xf5, 0x7c, 0x16, 0x4a, + 0xdf, 0xae, 0x66, 0xf6, 0x6b, 0x6e, 0x0b, 0xc6, 0xe4, 0x7a, 0xcb, 0xb8, 0x79, 0x57, 0x75, 0xc1, + 0xfa, 0xc4, 0xef, 0xf1, 0xda, 0x4d, 0x9d, 0xb5, 0x23, 0xd6, 0xda, 0x23, 0x6d, 0xe7, 0x7d, 0x98, + 0xd0, 0xd7, 0xd8, 0x23, 0x6d, 0xeb, 0x03, 0x38, 0x97, 0xb1, 0x96, 0x1e, 0x69, 0x93, 0xf7, 0xe1, + 0x62, 0xee, 0xfa, 0x78, 0x94, 0x0d, 0xdb, 0x3f, 0x63, 0xe9, 0x7c, 0xf0, 0x0c, 0xf4, 0xbd, 0xcb, + 0xa6, 0xbe, 0xf7, 0x52, 0xef, 0x9d, 0x93, 0xa3, 0xf4, 0x7d, 0x4f, 0xef, 0x34, 0xe5, 0xea, 0xe8, + 0x6d, 0x18, 0xf1, 0x68, 0x89, 0xb4, 0x44, 0xb0, 0xfb, 0xef, 0xc8, 0x44, 0x96, 0x62, 0xe5, 0x11, + 0x16, 0x14, 0xec, 0x5f, 0xb0, 0x60, 0xe8, 0x0c, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x72, 0x2e, 0x69, + 0x11, 0x06, 0x67, 0x1e, 0x3b, 0xf7, 0x57, 0x64, 0xa8, 0x9f, 0x9c, 0x81, 0xf9, 0xbf, 0x05, 0x18, + 0xa7, 0x4d, 0x49, 0x93, 0xb9, 0x37, 0x61, 0xd2, 0x73, 0x36, 0x89, 0x27, 0x1f, 0x0d, 0xd3, 0x0a, + 0x93, 0x5b, 0x3a, 0x10, 0x9b, 0xb8, 0xb4, 0xf2, 0x96, 0xfe, 0x7e, 0x2a, 0xe4, 0x17, 0x55, 0xd9, + 0x78, 0x5c, 0xc5, 0x26, 0x2e, 0xbd, 0xbb, 0xdf, 0x77, 0xe2, 0xe6, 0x8e, 0x50, 0xa6, 0xa8, 0xee, + 0xde, 0xa3, 0x85, 0x98, 0xc3, 0xa8, 0x00, 0x27, 0x57, 0xe7, 0x5d, 0x7a, 0x33, 0x0c, 0x7c, 0x21, + 0x1e, 0x2b, 0x01, 0x0e, 0x9b, 0x60, 0x9c, 0xc6, 0xcf, 0x70, 0x7e, 0x1e, 0x66, 0x06, 0x81, 0x03, + 0x38, 0x3f, 0xa3, 0x3a, 0x9c, 0x77, 0xfd, 0xa6, 0xd7, 0x69, 0x91, 0x3b, 0x3e, 0x97, 0xee, 0x3c, + 0xf7, 0x43, 0xd2, 0x12, 0x02, 0xb4, 0xb2, 0xdd, 0xac, 0x65, 0xe0, 0xe0, 0xcc, 0x9a, 0xf6, 0x5f, + 0x80, 0x73, 0xb7, 0x02, 0xa7, 0xb5, 0xe4, 0x78, 0x8e, 0xdf, 0x24, 0x61, 0xcd, 0xdf, 0xee, 0x6b, + 0x92, 0xa4, 0x1b, 0x10, 0x15, 0xfa, 0x19, 0x10, 0xd9, 0x3b, 0x80, 0xf4, 0x06, 0x84, 0x21, 0x2c, + 0x86, 0x51, 0x97, 0x37, 0x25, 0x96, 0xff, 0x73, 0xd9, 0xd2, 0x75, 0x57, 0xcf, 0x34, 0x13, 0x4f, + 0x5e, 0x80, 0x25, 0x21, 0xfb, 0x1a, 0x64, 0x3a, 0xbf, 0xf5, 0x57, 0xdb, 0xd8, 0xaf, 0xc3, 0x2c, + 0xab, 0x79, 0x32, 0x95, 0x82, 0xfd, 0xd7, 0x2c, 0x98, 0x5e, 0x4f, 0x85, 0x2b, 0x78, 0x96, 0x3d, + 0xec, 0x65, 0xe8, 0xdd, 0x1b, 0xac, 0x14, 0x0b, 0xe8, 0xa9, 0xeb, 0xf7, 0xfe, 0xd4, 0x82, 0x92, + 0x8a, 0x84, 0x72, 0x06, 0x42, 0xed, 0xb2, 0x21, 0xd4, 0x66, 0xea, 0x9d, 0x54, 0x77, 0xf2, 0x64, + 0x5a, 0x74, 0x53, 0x39, 0xde, 0xf7, 0x50, 0x39, 0x25, 0x64, 0xb8, 0x9b, 0xf6, 0x94, 0xe9, 0x9d, + 0x2f, 0x5d, 0xf1, 0x99, 0x4d, 0x90, 0xc2, 0xfd, 0x84, 0xd8, 0x04, 0xa9, 0xfe, 0xe4, 0x70, 0xbf, + 0xba, 0xd6, 0x65, 0x76, 0x2a, 0x7c, 0x2b, 0xb3, 0x9b, 0x67, 0x7b, 0x53, 0xc5, 0xbb, 0xa8, 0x08, + 0x3b, 0x78, 0x51, 0x7a, 0xcc, 0x18, 0x99, 0xf8, 0xc7, 0xa3, 0xd6, 0x24, 0x55, 0xec, 0x1b, 0x30, + 0x9d, 0x1a, 0x30, 0xf4, 0x3a, 0x0c, 0xb7, 0x77, 0x9c, 0x88, 0xa4, 0xec, 0x20, 0x87, 0xeb, 0xb4, + 0xf0, 0xf8, 0xb0, 0x32, 0xa5, 0x2a, 0xb0, 0x12, 0xcc, 0xb1, 0xed, 0xff, 0x69, 0xc1, 0xd0, 0x7a, + 0xd0, 0x3a, 0x8b, 0xc5, 0xf4, 0x96, 0xb1, 0x98, 0x9e, 0xcc, 0x8b, 0xf9, 0x95, 0xbb, 0x8e, 0x56, + 0x53, 0xeb, 0xe8, 0x52, 0x2e, 0x85, 0xde, 0x4b, 0x68, 0x0f, 0xc6, 0x59, 0x24, 0x31, 0x61, 0x97, + 0xf9, 0xaa, 0x71, 0xbf, 0xaa, 0xa4, 0xee, 0x57, 0xd3, 0x1a, 0xaa, 0x76, 0xcb, 0x7a, 0x1e, 0x46, + 0x85, 0x6d, 0x60, 0xda, 0x43, 0x40, 0xe0, 0x62, 0x09, 0xb7, 0x7f, 0xac, 0x08, 0x46, 0xe4, 0x32, + 0xf4, 0xcb, 0x16, 0xcc, 0x87, 0xdc, 0xe5, 0xb2, 0x55, 0xed, 0x84, 0xae, 0xbf, 0xdd, 0x68, 0xee, + 0x90, 0x56, 0xc7, 0x73, 0xfd, 0xed, 0xda, 0xb6, 0x1f, 0xa8, 0xe2, 0x95, 0x07, 0xa4, 0xd9, 0x61, + 0x6f, 0x2e, 0x7d, 0xc2, 0xa4, 0x29, 0xdb, 0x9b, 0xab, 0x47, 0x87, 0x95, 0x79, 0x7c, 0x22, 0xda, + 0xf8, 0x84, 0x7d, 0x41, 0xbf, 0x61, 0xc1, 0x02, 0x0f, 0xe8, 0x35, 0x78, 0xff, 0x7b, 0xdc, 0x46, + 0xeb, 0x92, 0x54, 0x42, 0x64, 0x83, 0x84, 0x7b, 0x4b, 0x6f, 0x88, 0x01, 0x5d, 0xa8, 0x9f, 0xac, + 0x2d, 0x7c, 0xd2, 0xce, 0xd9, 0xff, 0xa6, 0x08, 0x93, 0x74, 0x14, 0x93, 0x30, 0x23, 0xaf, 0x1b, + 0x4b, 0xe2, 0xe9, 0xd4, 0x92, 0x98, 0x35, 0x90, 0x4f, 0x27, 0xc2, 0x48, 0x04, 0xb3, 0x9e, 0x13, + 0xc5, 0x37, 0x88, 0x13, 0xc6, 0x9b, 0xc4, 0xe1, 0x36, 0x29, 0xc5, 0x13, 0xdb, 0xcf, 0x28, 0xf5, + 0xd7, 0xad, 0x34, 0x31, 0xdc, 0x4d, 0x1f, 0xed, 0x03, 0x62, 0x86, 0x35, 0xa1, 0xe3, 0x47, 0xfc, + 0x5b, 0x5c, 0xf1, 0x1e, 0x73, 0xb2, 0x56, 0xe7, 0x44, 0xab, 0xe8, 0x56, 0x17, 0x35, 0x9c, 0xd1, + 0x82, 0x66, 0x30, 0x35, 0x3c, 0xa8, 0xc1, 0xd4, 0x48, 0x1f, 0x37, 0x9c, 0x3d, 0x98, 0x11, 0xb3, + 0xb2, 0xe5, 0x6e, 0x8b, 0x43, 0xfa, 0xcb, 0x29, 0x83, 0x4a, 0x6b, 0x70, 0xab, 0x98, 0x3e, 0xd6, + 0x94, 0xf6, 0x77, 0xc2, 0x39, 0xda, 0x9c, 0xe9, 0x34, 0x12, 0x21, 0x02, 0xd3, 0xbb, 0x9d, 0x4d, + 0xe2, 0x91, 0x58, 0x96, 0x89, 0x46, 0x33, 0xc5, 0x7e, 0xb3, 0x76, 0x22, 0x5b, 0xde, 0x34, 0x49, + 0xe0, 0x34, 0x4d, 0xfb, 0x27, 0x2c, 0x60, 0xa6, 0xd9, 0x67, 0x70, 0xfc, 0x7d, 0xd1, 0x3c, 0xfe, + 0xca, 0x79, 0x1c, 0x28, 0xe7, 0xe4, 0x7b, 0x8d, 0x4f, 0x4b, 0x3d, 0x0c, 0x1e, 0x1c, 0x48, 0xd9, + 0xbf, 0xbf, 0xc4, 0xf5, 0x7f, 0x2c, 0xbe, 0x21, 0x95, 0x07, 0x3a, 0xfa, 0x2e, 0x18, 0x6b, 0x3a, + 0x6d, 0xa7, 0xc9, 0x43, 0x46, 0xe6, 0x6a, 0x7f, 0x8c, 0x4a, 0xf3, 0xcb, 0xa2, 0x06, 0xd7, 0x66, + 0x7c, 0x56, 0x7e, 0xa5, 0x2c, 0xee, 0xab, 0xc1, 0x50, 0x4d, 0xce, 0xed, 0xc2, 0xa4, 0x41, 0xec, + 0x91, 0x5e, 0x7d, 0xbf, 0x8b, 0x1f, 0x17, 0xea, 0xc6, 0xb2, 0x07, 0xb3, 0xbe, 0xf6, 0x9f, 0x32, + 0x47, 0x29, 0x4e, 0x7f, 0xba, 0xdf, 0x81, 0xc0, 0x38, 0xa9, 0x66, 0x7a, 0x9e, 0x22, 0x83, 0xbb, + 0x29, 0xdb, 0x7f, 0xcf, 0x82, 0xc7, 0x75, 0x44, 0x2d, 0x38, 0x40, 0x3f, 0x7d, 0x72, 0x15, 0xc6, + 0x82, 0x36, 0x09, 0x9d, 0xe4, 0x4e, 0x76, 0x45, 0x0e, 0xfa, 0x6d, 0x51, 0x7e, 0x7c, 0x58, 0x39, + 0xaf, 0x53, 0x97, 0xe5, 0x58, 0xd5, 0x44, 0x36, 0x8c, 0xb0, 0xc1, 0x88, 0x44, 0xe0, 0x06, 0x66, + 0x13, 0xc7, 0x9e, 0x56, 0x23, 0x2c, 0x20, 0xf6, 0xf7, 0x58, 0x7c, 0x61, 0xe9, 0x5d, 0x47, 0x1f, + 0xc0, 0xcc, 0x1e, 0xbd, 0xbe, 0xad, 0x3c, 0x68, 0x87, 0x5c, 0x8d, 0x2e, 0xc7, 0xe9, 0xc5, 0x7e, + 0xe3, 0xa4, 0x7d, 0x64, 0x62, 0x39, 0xb5, 0x96, 0x22, 0x86, 0xbb, 0xc8, 0xdb, 0x7f, 0x5c, 0xe0, + 0x3b, 0x91, 0x49, 0x75, 0xcf, 0xc3, 0x68, 0x3b, 0x68, 0x2d, 0xd7, 0xaa, 0x58, 0x8c, 0x90, 0x62, + 0x57, 0x75, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, 0x05, 0x20, 0x0f, 0x62, 0x12, 0xfa, 0x8e, 0xa7, 0x0c, + 0x3f, 0x94, 0xf0, 0xb4, 0xa2, 0x20, 0x58, 0xc3, 0xa2, 0x75, 0xda, 0x61, 0xb0, 0xef, 0xb6, 0x98, + 0x6b, 0x5b, 0xd1, 0xac, 0x53, 0x57, 0x10, 0xac, 0x61, 0xd1, 0xab, 0x72, 0xc7, 0x8f, 0xf8, 0x01, + 0xe8, 0x6c, 0x8a, 0x58, 0x67, 0x63, 0xc9, 0x55, 0xf9, 0x8e, 0x0e, 0xc4, 0x26, 0x2e, 0x5a, 0x84, + 0x91, 0xd8, 0x61, 0xe6, 0x0c, 0xc3, 0xf9, 0x36, 0x70, 0x1b, 0x14, 0x43, 0x8f, 0x21, 0x48, 0x2b, + 0x60, 0x51, 0x11, 0xbd, 0x2b, 0x59, 0x30, 0x67, 0xc9, 0xc2, 0xf8, 0x34, 0x77, 0xd9, 0xea, 0xec, + 0x5b, 0xe7, 0xc1, 0xc2, 0xa8, 0xd5, 0xa0, 0x65, 0x7f, 0x77, 0x09, 0x20, 0x91, 0xf6, 0xd0, 0x87, + 0x5d, 0x2c, 0xe2, 0xa5, 0xde, 0xf2, 0xe1, 0xe9, 0xf1, 0x07, 0xf4, 0xbd, 0x16, 0x8c, 0x3b, 0x9e, + 0x17, 0x34, 0x9d, 0x98, 0x8d, 0x72, 0xa1, 0x37, 0x8b, 0x12, 0xed, 0x2f, 0x26, 0x35, 0x78, 0x17, + 0x5e, 0x95, 0x96, 0x0a, 0x1a, 0xa4, 0x6f, 0x2f, 0xf4, 0x86, 0xd1, 0x67, 0xe5, 0x25, 0x80, 0x2f, + 0x8f, 0xb9, 0xf4, 0x25, 0xa0, 0xc4, 0xb8, 0xb1, 0x26, 0xff, 0xa3, 0x3b, 0x46, 0x50, 0xb0, 0xa1, + 0xfc, 0xf8, 0x07, 0x86, 0xd0, 0xd3, 0x2f, 0x1e, 0x18, 0xaa, 0xeb, 0x4e, 0x38, 0xc3, 0xf9, 0x41, + 0x42, 0x34, 0xe9, 0xba, 0x8f, 0x03, 0xce, 0xfb, 0x30, 0xdd, 0x32, 0x8f, 0x5b, 0xb1, 0x9a, 0x9e, + 0xcb, 0xa3, 0x9b, 0x3a, 0x9d, 0x93, 0x03, 0x36, 0x05, 0xc0, 0x69, 0xc2, 0xa8, 0xce, 0xdd, 0xa1, + 0x6a, 0xfe, 0x56, 0x20, 0x8c, 0x98, 0xed, 0xdc, 0xb9, 0x3c, 0x88, 0x62, 0xb2, 0x47, 0x31, 0x93, + 0x73, 0x74, 0x5d, 0xd4, 0xc5, 0x8a, 0x0a, 0x7a, 0x1b, 0x46, 0x98, 0x8f, 0x6a, 0x54, 0x1e, 0xcb, + 0xd7, 0x03, 0x9a, 0xe1, 0x15, 0x92, 0x4d, 0xc5, 0xfe, 0x46, 0x58, 0x50, 0x40, 0x37, 0x64, 0x0c, + 0x96, 0xa8, 0xe6, 0xdf, 0x89, 0x08, 0x8b, 0xc1, 0x52, 0x5a, 0xfa, 0x74, 0x12, 0x5e, 0x85, 0x97, + 0x67, 0x46, 0x0b, 0x36, 0x6a, 0x52, 0x79, 0x45, 0xfc, 0x97, 0x41, 0x88, 0xcb, 0x90, 0xdf, 0x3d, + 0x33, 0x50, 0x71, 0x32, 0x9c, 0x77, 0x4d, 0x12, 0x38, 0x4d, 0xf3, 0x4c, 0x8f, 0xcf, 0x39, 0x1f, + 0x66, 0xd2, 0x1b, 0xeb, 0x91, 0x1e, 0xd7, 0xbf, 0x3f, 0x04, 0x53, 0xe6, 0x42, 0x40, 0x0b, 0x50, + 0x12, 0x44, 0x54, 0x3c, 0x46, 0xb5, 0xb6, 0xd7, 0x24, 0x00, 0x27, 0x38, 0x2c, 0x1e, 0x25, 0xab, + 0xae, 0xd9, 0x01, 0x26, 0xf1, 0x28, 0x15, 0x04, 0x6b, 0x58, 0x54, 0x88, 0xde, 0x0c, 0x82, 0x58, + 0x1d, 0x05, 0x6a, 0xb5, 0x2c, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x08, 0xd8, 0x25, 0xa1, 0x4f, 0x3c, + 0x53, 0x93, 0xa9, 0x8e, 0x80, 0x9b, 0x3a, 0x10, 0x9b, 0xb8, 0xf4, 0x48, 0x0b, 0x22, 0xb6, 0xfc, + 0x84, 0xa8, 0x9e, 0xd8, 0x55, 0x36, 0xb8, 0x8f, 0xb6, 0x84, 0xa3, 0x2f, 0xc3, 0xe3, 0xca, 0xa5, + 0x1a, 0x73, 0xcd, 0xb0, 0x6c, 0x71, 0xc4, 0xb8, 0x59, 0x3f, 0xbe, 0x9c, 0x8d, 0x86, 0xf3, 0xea, + 0xa3, 0xb7, 0x60, 0x4a, 0x88, 0xc0, 0x92, 0xe2, 0xa8, 0x69, 0xac, 0x70, 0xd3, 0x80, 0xe2, 0x14, + 0x36, 0xaa, 0xc2, 0x0c, 0x2d, 0x61, 0x52, 0xa8, 0xa4, 0xc0, 0x5d, 0xc3, 0xd5, 0x59, 0x7f, 0x33, + 0x05, 0xc7, 0x5d, 0x35, 0xd0, 0x22, 0x4c, 0x73, 0x19, 0x85, 0xde, 0x29, 0xd9, 0x3c, 0x08, 0xdf, + 0x02, 0xb5, 0x11, 0x6e, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0xd7, 0x60, 0xc2, 0x09, 0x9b, 0x3b, 0x6e, + 0x4c, 0x9a, 0x71, 0x27, 0xe4, 0x4e, 0x07, 0x9a, 0xb5, 0xc7, 0xa2, 0x06, 0xc3, 0x06, 0xa6, 0xfd, + 0x21, 0x9c, 0xcb, 0x70, 0x4b, 0xa2, 0x0b, 0xc7, 0x69, 0xbb, 0xf2, 0x9b, 0x52, 0x16, 0x92, 0x8b, + 0xf5, 0x9a, 0xfc, 0x1a, 0x0d, 0x8b, 0xae, 0x4e, 0xa6, 0x12, 0xd7, 0x22, 0x85, 0xab, 0xd5, 0xb9, + 0x2a, 0x01, 0x38, 0xc1, 0xb1, 0x7f, 0x1d, 0x40, 0x53, 0xe8, 0x0c, 0x60, 0x1f, 0x77, 0x0d, 0x26, + 0x64, 0x78, 0x7b, 0x2d, 0xac, 0xb2, 0xfa, 0xcc, 0xeb, 0x1a, 0x0c, 0x1b, 0x98, 0xb4, 0x6f, 0xbe, + 0x0a, 0x0a, 0x9d, 0xb2, 0xc7, 0x4c, 0x42, 0x42, 0x27, 0x38, 0xe8, 0x25, 0x18, 0x8b, 0x88, 0xb7, + 0x75, 0xcb, 0xf5, 0x77, 0xc5, 0xc2, 0x56, 0x5c, 0xb8, 0x21, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x50, + 0xec, 0xb8, 0x2d, 0xb1, 0x94, 0xe5, 0x81, 0x5f, 0xbc, 0x53, 0xab, 0x1e, 0x1f, 0x56, 0x9e, 0xce, + 0x8b, 0xda, 0x4f, 0xaf, 0xf6, 0xd1, 0x3c, 0xdd, 0x7e, 0xb4, 0x72, 0xd6, 0xdb, 0xc0, 0xc8, 0x09, + 0xdf, 0x06, 0xae, 0x02, 0x88, 0xaf, 0x96, 0x6b, 0xb9, 0x98, 0xcc, 0xda, 0x75, 0x05, 0xc1, 0x1a, + 0x16, 0x8a, 0x60, 0xb6, 0x19, 0x12, 0x47, 0xde, 0xa1, 0xb9, 0x83, 0xcd, 0xd8, 0xc3, 0x2b, 0x08, + 0x96, 0xd3, 0xc4, 0x70, 0x37, 0x7d, 0x14, 0xc0, 0x6c, 0x4b, 0x78, 0xf0, 0x27, 0x8d, 0x96, 0x4e, + 0xee, 0xd5, 0xc3, 0x0c, 0x72, 0xd2, 0x84, 0x70, 0x37, 0x6d, 0xf4, 0x15, 0x98, 0x93, 0x85, 0xdd, + 0x41, 0x13, 0xd8, 0x76, 0x29, 0x2e, 0x5d, 0x3a, 0x3a, 0xac, 0xcc, 0x55, 0x73, 0xb1, 0x70, 0x0f, + 0x0a, 0x08, 0xc3, 0x08, 0x7b, 0x4b, 0x8a, 0xca, 0xe3, 0xec, 0x9c, 0x7b, 0x21, 0x5f, 0x19, 0x40, + 0xd7, 0xfa, 0x3c, 0x7b, 0x87, 0x12, 0x26, 0xe5, 0xc9, 0xb3, 0x1c, 0x2b, 0xc4, 0x82, 0x12, 0xda, + 0x82, 0x71, 0xc7, 0xf7, 0x83, 0xd8, 0xe1, 0x22, 0xd4, 0x44, 0xbe, 0xec, 0xa7, 0x11, 0x5e, 0x4c, + 0x6a, 0x70, 0xea, 0xca, 0x4a, 0x55, 0x83, 0x60, 0x9d, 0x30, 0xba, 0x0f, 0xd3, 0xc1, 0x7d, 0xca, + 0x1c, 0xa5, 0x96, 0x22, 0x2a, 0x4f, 0xb2, 0xb6, 0x5e, 0x1b, 0x50, 0x4f, 0x6b, 0x54, 0xd6, 0xb8, + 0x96, 0x49, 0x14, 0xa7, 0x5b, 0x41, 0xf3, 0x86, 0xb6, 0x7a, 0x2a, 0xf1, 0x9d, 0x48, 0xb4, 0xd5, + 0xba, 0x72, 0x9a, 0x05, 0xe1, 0xe0, 0x26, 0xd2, 0x6c, 0xf7, 0x4f, 0xa7, 0x82, 0x70, 0x24, 0x20, + 0xac, 0xe3, 0xa1, 0x1d, 0x98, 0x48, 0x9e, 0xac, 0xc2, 0x88, 0x85, 0x00, 0x1b, 0xbf, 0x7a, 0x75, + 0xb0, 0x8f, 0xab, 0x69, 0x35, 0xf9, 0xcd, 0x41, 0x2f, 0xc1, 0x06, 0xe5, 0xb9, 0x6f, 0x81, 0x71, + 0x6d, 0x62, 0x4f, 0xe2, 0x01, 0x30, 0xf7, 0x16, 0xcc, 0xa4, 0xa7, 0xee, 0x44, 0x1e, 0x04, 0xff, + 0xbb, 0x00, 0xd3, 0x19, 0x2f, 0x57, 0x2c, 0xf2, 0x7f, 0x8a, 0xa1, 0x26, 0x81, 0xfe, 0x4d, 0xb6, + 0x58, 0x18, 0x80, 0x2d, 0x4a, 0x1e, 0x5d, 0xcc, 0xe5, 0xd1, 0x82, 0x15, 0x0e, 0x7d, 0x14, 0x56, + 0x68, 0x9e, 0x3e, 0xc3, 0x03, 0x9d, 0x3e, 0xa7, 0xc0, 0x3e, 0x8d, 0x03, 0x6c, 0x74, 0x80, 0x03, + 0xec, 0x07, 0x0b, 0x30, 0x93, 0xb6, 0xf0, 0x3d, 0x83, 0xf7, 0x8e, 0xb7, 0x8d, 0xf7, 0x8e, 0xec, + 0x3c, 0x1a, 0x69, 0xbb, 0xe3, 0xbc, 0xb7, 0x0f, 0x9c, 0x7a, 0xfb, 0x78, 0x61, 0x20, 0x6a, 0xbd, + 0xdf, 0x41, 0xfe, 0x7e, 0x01, 0x2e, 0xa4, 0xab, 0x2c, 0x7b, 0x8e, 0xbb, 0x77, 0x06, 0x63, 0x73, + 0xdb, 0x18, 0x9b, 0x97, 0x07, 0xf9, 0x1a, 0xd6, 0xb5, 0xdc, 0x01, 0xba, 0x97, 0x1a, 0xa0, 0x85, + 0xc1, 0x49, 0xf6, 0x1e, 0xa5, 0x6f, 0x14, 0xe1, 0x52, 0x66, 0xbd, 0xe4, 0xb9, 0x60, 0xd5, 0x78, + 0x2e, 0xb8, 0x9a, 0x7a, 0x2e, 0xb0, 0x7b, 0xd7, 0x3e, 0x9d, 0xf7, 0x03, 0xe1, 0x7b, 0xcb, 0xc2, + 0x53, 0x3e, 0xe4, 0xdb, 0x81, 0xe1, 0x7b, 0xab, 0x08, 0x61, 0x93, 0xee, 0x9f, 0xa7, 0x37, 0x83, + 0x5f, 0xb7, 0xe0, 0x62, 0xe6, 0xdc, 0x9c, 0x81, 0x5e, 0x7d, 0xdd, 0xd4, 0xab, 0x3f, 0x3f, 0xf0, + 0x6a, 0xcd, 0x51, 0xb4, 0xff, 0x41, 0x31, 0xe7, 0x5b, 0x98, 0x66, 0xf2, 0x36, 0x8c, 0x3b, 0xcd, + 0x26, 0x89, 0xa2, 0xb5, 0xa0, 0xa5, 0xc2, 0x35, 0xbe, 0xcc, 0xa4, 0x8d, 0xa4, 0xf8, 0xf8, 0xb0, + 0x32, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0xc1, 0x8c, 0x30, 0x5b, 0x38, 0xd5, 0x08, 0xb3, 0x57, + 0x01, 0xf6, 0x95, 0xbe, 0x22, 0xad, 0xe6, 0xd4, 0x34, 0x19, 0x1a, 0x16, 0xfa, 0x0e, 0x76, 0x0b, + 0xe0, 0xc6, 0x40, 0x7c, 0x29, 0xbe, 0x3a, 0xe0, 0x5c, 0xe9, 0x86, 0x45, 0x3c, 0xc8, 0x83, 0x52, + 0x09, 0x2b, 0x92, 0xe8, 0xdb, 0x60, 0x26, 0xe2, 0x31, 0x84, 0x96, 0x3d, 0x27, 0x62, 0x4e, 0x5c, + 0x62, 0x15, 0xb2, 0xc8, 0x0d, 0x8d, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xf9, 0x51, 0x2c, 0xe0, + 0x11, 0x5f, 0x98, 0xcf, 0x26, 0x1f, 0x24, 0xf2, 0x0e, 0x9d, 0x4f, 0x0f, 0x3f, 0x1b, 0x78, 0xad, + 0xa6, 0xfd, 0x83, 0x43, 0xf0, 0x44, 0x0f, 0x26, 0x86, 0x16, 0x4d, 0x23, 0x80, 0x17, 0xd3, 0xfa, + 0xbf, 0xb9, 0xcc, 0xca, 0x86, 0x42, 0x30, 0xb5, 0x56, 0x0a, 0x1f, 0x79, 0xad, 0x7c, 0x9f, 0xa5, + 0x69, 0x66, 0xb9, 0xa9, 0xf0, 0x17, 0x4f, 0xc8, 0x9c, 0x4f, 0x51, 0x55, 0xbb, 0x95, 0xa1, 0xef, + 0xbc, 0x3a, 0x70, 0x77, 0x06, 0x56, 0x80, 0x9e, 0xed, 0x93, 0xd1, 0xd7, 0x2c, 0x78, 0x3a, 0xb3, + 0xbf, 0x86, 0xd1, 0xd2, 0x02, 0x94, 0x9a, 0xb4, 0x50, 0x73, 0x0c, 0x4d, 0xdc, 0xb3, 0x25, 0x00, + 0x27, 0x38, 0x86, 0x6d, 0x52, 0xa1, 0xaf, 0x6d, 0xd2, 0xbf, 0xb6, 0xa0, 0x6b, 0x01, 0x9f, 0x01, + 0x27, 0xad, 0x99, 0x9c, 0xf4, 0xd3, 0x83, 0xcc, 0x65, 0x0e, 0x13, 0xfd, 0xad, 0x69, 0x78, 0x2c, + 0xc7, 0x13, 0x6c, 0x1f, 0x66, 0xb7, 0x9b, 0xc4, 0x74, 0xb9, 0x15, 0x1f, 0x93, 0xe9, 0x9d, 0xdc, + 0xd3, 0x3f, 0x97, 0x5f, 0x88, 0xbb, 0x50, 0x70, 0x77, 0x13, 0xe8, 0x6b, 0x16, 0x9c, 0x77, 0xee, + 0x47, 0x5d, 0x69, 0x01, 0xc5, 0x9a, 0x79, 0x2d, 0x53, 0x4f, 0xdb, 0x27, 0x8d, 0x20, 0x73, 0x8b, + 0x3b, 0x9f, 0x85, 0x85, 0x33, 0xdb, 0x42, 0x58, 0x44, 0xd8, 0xa5, 0xf2, 0x76, 0x0f, 0xa7, 0xf0, + 0x2c, 0x97, 0x3d, 0xce, 0x53, 0x25, 0x04, 0x2b, 0x3a, 0xe8, 0x2e, 0x94, 0xb6, 0xa5, 0x1f, 0xad, + 0xe0, 0xd9, 0x99, 0x87, 0x60, 0xa6, 0xb3, 0x2d, 0xf7, 0x1d, 0x51, 0x20, 0x9c, 0x90, 0x42, 0x6f, + 0x41, 0xd1, 0xdf, 0x8a, 0x7a, 0x65, 0x36, 0x4a, 0xd9, 0xf2, 0x71, 0xef, 0xfe, 0xf5, 0xd5, 0x06, + 0xa6, 0x15, 0xd1, 0x0d, 0x28, 0x86, 0x9b, 0x2d, 0xf1, 0xb4, 0x90, 0x29, 0x97, 0xe2, 0xa5, 0x6a, + 0xf6, 0x22, 0xe1, 0x94, 0xf0, 0x52, 0x15, 0x53, 0x12, 0xa8, 0x0e, 0xc3, 0xcc, 0x69, 0x4a, 0xbc, + 0x20, 0x64, 0x0a, 0xa4, 0x3d, 0x9c, 0x0f, 0x79, 0x08, 0x00, 0x86, 0x80, 0x39, 0x21, 0xf4, 0x36, + 0x8c, 0x34, 0x59, 0xf2, 0x1f, 0xa1, 0xf8, 0xc9, 0x8e, 0x0d, 0xd5, 0x95, 0x1e, 0x88, 0xbf, 0xa0, + 0xf2, 0x72, 0x2c, 0x28, 0xa0, 0x0d, 0x18, 0x69, 0x92, 0xf6, 0xce, 0x56, 0x24, 0xf4, 0x39, 0x9f, + 0xcd, 0xa4, 0xd5, 0x23, 0xd7, 0x95, 0xa0, 0xca, 0x30, 0xb0, 0xa0, 0x85, 0x3e, 0x0f, 0x85, 0xad, + 0xa6, 0xf0, 0xa4, 0xca, 0x7c, 0x43, 0x30, 0xc3, 0x32, 0x2c, 0x8d, 0x1c, 0x1d, 0x56, 0x0a, 0xab, + 0xcb, 0xb8, 0xb0, 0xd5, 0x44, 0xeb, 0x30, 0xba, 0xc5, 0x7d, 0xeb, 0x45, 0xb4, 0x94, 0xe7, 0xb2, + 0xdd, 0xfe, 0xbb, 0xdc, 0xef, 0xb9, 0x07, 0x90, 0x00, 0x60, 0x49, 0x04, 0x6d, 0x00, 0x6c, 0xa9, + 0x18, 0x01, 0x22, 0x4a, 0xfb, 0xa7, 0x07, 0x89, 0x24, 0x20, 0x94, 0x1b, 0xaa, 0x14, 0x6b, 0x74, + 0xd0, 0x57, 0xa1, 0xe4, 0xc8, 0xe4, 0x73, 0x22, 0xce, 0xca, 0xab, 0x99, 0x9b, 0xb0, 0x77, 0x5e, + 0x3e, 0xbe, 0x82, 0x15, 0x12, 0x4e, 0x88, 0xa2, 0x5d, 0x98, 0xdc, 0x8f, 0xda, 0x3b, 0x44, 0x6e, + 0x5a, 0x16, 0x76, 0x25, 0xe7, 0x90, 0xba, 0x2b, 0x10, 0xdd, 0x30, 0xee, 0x38, 0x5e, 0x17, 0x9f, + 0x61, 0xee, 0x62, 0x77, 0x75, 0x62, 0xd8, 0xa4, 0x4d, 0x07, 0xfd, 0x83, 0x4e, 0xb0, 0x79, 0x10, + 0x13, 0x11, 0xcc, 0x3d, 0x73, 0xd0, 0xdf, 0xe1, 0x28, 0xdd, 0x83, 0x2e, 0x00, 0x58, 0x12, 0xa1, + 0xdb, 0xda, 0x91, 0x89, 0x1d, 0x85, 0x06, 0xe7, 0xf9, 0xdc, 0xe1, 0xe9, 0xea, 0x6f, 0x32, 0x28, + 0x8c, 0x1f, 0x26, 0xa4, 0x18, 0x1f, 0x6c, 0xef, 0x04, 0x71, 0xe0, 0xa7, 0x78, 0xf0, 0x6c, 0x3e, + 0x1f, 0xac, 0x67, 0xe0, 0x77, 0xf3, 0xc1, 0x2c, 0x2c, 0x9c, 0xd9, 0x16, 0x6a, 0xc1, 0x54, 0x3b, + 0x08, 0xe3, 0xfb, 0x41, 0x28, 0x57, 0x15, 0xea, 0x71, 0xb5, 0x37, 0x30, 0x45, 0x8b, 0xcc, 0xfa, + 0xdb, 0x84, 0xe0, 0x14, 0x4d, 0xf4, 0x25, 0x18, 0x8d, 0x9a, 0x8e, 0x47, 0x6a, 0xb7, 0xcb, 0xe7, + 0xf2, 0x0f, 0x98, 0x06, 0x47, 0xc9, 0x59, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, 0xd0, 0x2a, + 0x0c, 0xb3, 0x0c, 0x21, 0x2c, 0x0e, 0x7d, 0x4e, 0x3c, 0xaf, 0x2e, 0x0b, 0x69, 0xce, 0x87, 0x58, + 0x31, 0xe6, 0xd5, 0xe9, 0x1e, 0x10, 0x12, 0x6e, 0x10, 0x95, 0x2f, 0xe4, 0xef, 0x01, 0x21, 0x18, + 0xdf, 0x6e, 0xf4, 0xda, 0x03, 0x0a, 0x09, 0x27, 0x44, 0x29, 0x17, 0xa6, 0x9c, 0xf3, 0xb1, 0x7c, + 0x2e, 0x9c, 0xcf, 0x37, 0x19, 0x17, 0xa6, 0x5c, 0x93, 0x92, 0xb0, 0xbf, 0x36, 0xda, 0x2d, 0x95, + 0xb0, 0x3b, 0xd1, 0x77, 0x5b, 0x5d, 0x06, 0x03, 0x9f, 0x1b, 0x54, 0x45, 0x73, 0x8a, 0xf2, 0xe8, + 0xd7, 0x2c, 0x78, 0xac, 0x9d, 0xf9, 0x21, 0xe2, 0x88, 0x1f, 0x4c, 0xd3, 0xc3, 0x3f, 0x5d, 0xe5, + 0x8a, 0xc8, 0x86, 0xe3, 0x9c, 0x96, 0xd2, 0x32, 0x7f, 0xf1, 0x23, 0xcb, 0xfc, 0x6b, 0x30, 0xc6, + 0xc4, 0xc8, 0x24, 0x78, 0xdc, 0x40, 0x66, 0x77, 0x4c, 0x58, 0x58, 0x16, 0x15, 0xb1, 0x22, 0x81, + 0xbe, 0xdf, 0x82, 0xa7, 0xd2, 0x5d, 0xc7, 0x84, 0x81, 0x45, 0x20, 0x62, 0x7e, 0x1d, 0x5b, 0x15, + 0xdf, 0xff, 0x54, 0xbd, 0x17, 0xf2, 0x71, 0x3f, 0x04, 0xdc, 0xbb, 0x31, 0x54, 0xcd, 0xb8, 0x0f, + 0x8e, 0x98, 0xef, 0x89, 0x03, 0xdc, 0x09, 0x5f, 0x83, 0x89, 0xbd, 0xa0, 0xe3, 0x4b, 0x9f, 0x18, + 0xe1, 0xf1, 0xcc, 0x74, 0xd7, 0x6b, 0x5a, 0x39, 0x36, 0xb0, 0x52, 0x37, 0xc9, 0xb1, 0x87, 0xbd, + 0x49, 0x9e, 0xed, 0xfd, 0xe4, 0xeb, 0x56, 0x86, 0x60, 0xcd, 0x6f, 0xac, 0x5f, 0x30, 0x6f, 0xac, + 0xcf, 0xa6, 0x6f, 0xac, 0x5d, 0x1a, 0x4a, 0xe3, 0xb2, 0x3a, 0x78, 0xa0, 0xf6, 0x41, 0xa3, 0xf4, + 0xd9, 0x1e, 0x5c, 0xee, 0x77, 0x70, 0x30, 0x13, 0xc6, 0x96, 0x7a, 0xdb, 0x4f, 0x4c, 0x18, 0x5b, + 0xb5, 0x2a, 0x66, 0x90, 0x41, 0x23, 0xeb, 0xd8, 0xff, 0xdd, 0x82, 0x62, 0x3d, 0x68, 0x9d, 0x81, + 0xc6, 0xf5, 0x8b, 0x86, 0xc6, 0xf5, 0x89, 0x9c, 0x14, 0xd6, 0xb9, 0xfa, 0xd5, 0x95, 0x94, 0x7e, + 0xf5, 0xa9, 0x3c, 0x02, 0xbd, 0xb5, 0xa9, 0x3f, 0x5e, 0x04, 0x3d, 0xe1, 0x36, 0xfa, 0xb7, 0x0f, + 0x63, 0x0b, 0x5f, 0xec, 0x95, 0x83, 0x5b, 0x50, 0x66, 0x96, 0x8f, 0xd2, 0xcd, 0xf6, 0xcf, 0x98, + 0x49, 0xfc, 0x3d, 0xe2, 0x6e, 0xef, 0xc4, 0xa4, 0x95, 0xfe, 0x9c, 0xb3, 0x33, 0x89, 0xff, 0xaf, + 0x16, 0x4c, 0xa7, 0x5a, 0x47, 0x5e, 0x96, 0xcf, 0xde, 0x43, 0x6a, 0xda, 0x66, 0xfb, 0x3a, 0xf9, + 0xcd, 0x03, 0xa8, 0xe7, 0x2c, 0xa9, 0x85, 0x62, 0x72, 0xb9, 0x7a, 0xef, 0x8a, 0xb0, 0x86, 0x81, + 0x5e, 0x87, 0xf1, 0x38, 0x68, 0x07, 0x5e, 0xb0, 0x7d, 0x70, 0x93, 0xc8, 0x58, 0x4e, 0xea, 0xd1, + 0x71, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0xc9, 0x22, 0xa4, 0x93, 0xb4, 0x7f, 0x73, 0x4d, 0x7e, + 0x32, 0xd7, 0xe4, 0x37, 0x2c, 0x98, 0xa1, 0xad, 0x33, 0xab, 0x32, 0x79, 0x1c, 0xaa, 0x84, 0x51, + 0x56, 0x8f, 0x84, 0x51, 0xcf, 0x52, 0xde, 0xd5, 0x0a, 0x3a, 0xb1, 0xd0, 0x62, 0x69, 0xcc, 0x89, + 0x96, 0x62, 0x01, 0x15, 0x78, 0x24, 0x0c, 0x85, 0x27, 0x9e, 0x8e, 0x47, 0xc2, 0x10, 0x0b, 0xa8, + 0xcc, 0x27, 0x35, 0x94, 0x93, 0x4f, 0x8a, 0x85, 0x41, 0x14, 0x96, 0x4c, 0x42, 0x30, 0xd1, 0xc2, + 0x20, 0x4a, 0x13, 0xa7, 0x04, 0xc7, 0xfe, 0x99, 0x22, 0x4c, 0xd4, 0x83, 0x56, 0xf2, 0xa0, 0xf4, + 0x9a, 0xf1, 0xa0, 0x74, 0x39, 0xf5, 0xa0, 0x34, 0xa3, 0xe3, 0x7e, 0xf3, 0xf9, 0xe8, 0xe3, 0x7a, + 0x3e, 0xfa, 0x57, 0x16, 0x9b, 0xb5, 0xea, 0x7a, 0x43, 0xe4, 0x3b, 0x7e, 0x05, 0xc6, 0x19, 0x43, + 0x62, 0xae, 0x9f, 0xf2, 0x95, 0x85, 0xa5, 0x35, 0x58, 0x4f, 0x8a, 0xb1, 0x8e, 0x83, 0xae, 0xc0, + 0x58, 0x44, 0x9c, 0xb0, 0xb9, 0xa3, 0x78, 0x9c, 0x78, 0x83, 0xe0, 0x65, 0x58, 0x41, 0xd1, 0x3b, + 0x49, 0x50, 0xc4, 0x62, 0x7e, 0xe6, 0x5e, 0xbd, 0x3f, 0x7c, 0x8b, 0xe4, 0x47, 0x42, 0xb4, 0xef, + 0x01, 0xea, 0xc6, 0x1f, 0xc0, 0xbc, 0xab, 0x62, 0x86, 0x3f, 0x2b, 0x75, 0x85, 0x3e, 0xfb, 0x13, + 0x0b, 0xa6, 0xea, 0x41, 0x8b, 0x6e, 0xdd, 0x3f, 0x4f, 0xfb, 0x54, 0x0f, 0x3f, 0x3a, 0xd2, 0x23, + 0xfc, 0xe8, 0x3f, 0xb0, 0x60, 0xb4, 0x1e, 0xb4, 0xce, 0x40, 0xf7, 0xfd, 0x05, 0x53, 0xf7, 0xfd, + 0x78, 0xce, 0x92, 0xc8, 0x51, 0x77, 0xff, 0x5c, 0x11, 0x26, 0x69, 0x3f, 0x83, 0x6d, 0x39, 0x4b, + 0xc6, 0x88, 0x58, 0x03, 0x8c, 0x08, 0x15, 0x73, 0x03, 0xcf, 0x0b, 0xee, 0xa7, 0x67, 0x6c, 0x95, + 0x95, 0x62, 0x01, 0x45, 0x2f, 0xc1, 0x58, 0x3b, 0x24, 0xfb, 0x6e, 0xd0, 0x89, 0xd2, 0x5e, 0xce, + 0x75, 0x51, 0x8e, 0x15, 0x06, 0xbd, 0x19, 0x45, 0xae, 0xdf, 0x24, 0xd2, 0xee, 0x6b, 0x88, 0xd9, + 0x7d, 0xf1, 0xb8, 0xe2, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x3d, 0x28, 0xb1, 0xff, 0x8c, 0xa3, 0x9c, + 0x3c, 0xd3, 0x95, 0x48, 0xe6, 0x21, 0x08, 0xe0, 0x84, 0x16, 0xba, 0x0a, 0x10, 0x4b, 0x0b, 0xb5, + 0x48, 0x38, 0xe1, 0x2b, 0x59, 0x5b, 0xd9, 0xae, 0x45, 0x58, 0xc3, 0x42, 0x2f, 0x42, 0x29, 0x76, + 0x5c, 0xef, 0x96, 0xeb, 0x93, 0x48, 0x58, 0xf8, 0x89, 0x5c, 0x1d, 0xa2, 0x10, 0x27, 0x70, 0x2a, + 0xeb, 0xb0, 0x10, 0x0f, 0x3c, 0x4f, 0xde, 0x18, 0xc3, 0x66, 0xb2, 0xce, 0x2d, 0x55, 0x8a, 0x35, + 0x0c, 0xfb, 0x1a, 0x5c, 0xa8, 0x07, 0xad, 0x7a, 0x10, 0xc6, 0xab, 0x41, 0x78, 0xdf, 0x09, 0x5b, + 0x72, 0xfe, 0x2a, 0x32, 0x6d, 0x04, 0xe5, 0x3d, 0xc3, 0x7c, 0x67, 0x1a, 0x09, 0x21, 0x5e, 0x65, + 0xd2, 0xce, 0x09, 0xdd, 0xb1, 0xfe, 0x7d, 0x81, 0x31, 0x8a, 0x54, 0xf2, 0x46, 0xf4, 0x15, 0x98, + 0x8a, 0xc8, 0x2d, 0xd7, 0xef, 0x3c, 0x90, 0x37, 0xd8, 0x1e, 0xbe, 0x6e, 0x8d, 0x15, 0x1d, 0x93, + 0xeb, 0xc1, 0xcc, 0x32, 0x9c, 0xa2, 0x46, 0x87, 0x30, 0xec, 0xf8, 0x8b, 0xd1, 0x9d, 0x88, 0x84, + 0x22, 0x79, 0x20, 0x1b, 0x42, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x4b, 0x86, 0xfd, 0x59, 0x0f, 0x7c, + 0x1c, 0x04, 0xb1, 0x5c, 0x64, 0x2c, 0xfd, 0x94, 0x56, 0x8e, 0x0d, 0x2c, 0xb4, 0x0a, 0x28, 0xea, + 0xb4, 0xdb, 0x1e, 0x7b, 0x98, 0x76, 0xbc, 0xeb, 0x61, 0xd0, 0x69, 0xf3, 0x47, 0x41, 0x91, 0xb9, + 0xa9, 0xd1, 0x05, 0xc5, 0x19, 0x35, 0x28, 0x63, 0xd8, 0x8a, 0xd8, 0x6f, 0x11, 0xe5, 0x81, 0x6b, + 0xa4, 0x1b, 0xac, 0x08, 0x4b, 0x98, 0xfd, 0x5d, 0xec, 0xc0, 0x60, 0x39, 0xdf, 0xe2, 0x4e, 0x48, + 0xd0, 0x1e, 0x4c, 0xb6, 0xd9, 0x51, 0x2e, 0xa2, 0x67, 0x8b, 0x01, 0x7c, 0x38, 0x7b, 0x3e, 0x9e, + 0x03, 0x4a, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x7f, 0x9a, 0x66, 0x7c, 0xa9, 0xc1, 0xaf, 0x73, 0xa3, + 0xc2, 0x4a, 0x5f, 0xc8, 0xae, 0x73, 0xf9, 0x59, 0x22, 0x93, 0x23, 0x44, 0x58, 0xfa, 0x63, 0x59, + 0x17, 0xbd, 0xc3, 0x5e, 0x53, 0x39, 0x33, 0xe8, 0x97, 0x3c, 0x9a, 0x63, 0x19, 0x0f, 0xa7, 0xa2, + 0x22, 0xd6, 0x88, 0xa0, 0x5b, 0x30, 0x29, 0x52, 0x84, 0x09, 0xd5, 0x4e, 0xd1, 0x50, 0x0c, 0x4c, + 0x62, 0x1d, 0x78, 0x9c, 0x2e, 0xc0, 0x66, 0x65, 0xb4, 0x0d, 0x4f, 0x69, 0xf9, 0x32, 0x33, 0x6c, + 0x4a, 0x39, 0x6f, 0x79, 0xfa, 0xe8, 0xb0, 0xf2, 0xd4, 0x46, 0x2f, 0x44, 0xdc, 0x9b, 0x0e, 0xba, + 0x0d, 0x17, 0x9c, 0x66, 0xec, 0xee, 0x93, 0x2a, 0x71, 0x5a, 0x9e, 0xeb, 0x13, 0x33, 0xec, 0xc7, + 0xc5, 0xa3, 0xc3, 0xca, 0x85, 0xc5, 0x2c, 0x04, 0x9c, 0x5d, 0x0f, 0x7d, 0x01, 0x4a, 0x2d, 0x3f, + 0x12, 0x63, 0x30, 0x62, 0xa4, 0x82, 0x2d, 0x55, 0xd7, 0x1b, 0xea, 0xfb, 0x93, 0x3f, 0x38, 0xa9, + 0x80, 0xb6, 0x61, 0x42, 0x77, 0xed, 0x13, 0x69, 0x84, 0x5f, 0xee, 0x71, 0xeb, 0x37, 0xfc, 0xe1, + 0xb8, 0x5e, 0x53, 0x59, 0x6c, 0x1b, 0xae, 0x72, 0x06, 0x61, 0xf4, 0x36, 0x20, 0x2a, 0xcc, 0xb8, + 0x4d, 0xb2, 0xd8, 0x64, 0x41, 0xcc, 0x99, 0x36, 0x6c, 0xcc, 0x70, 0x3f, 0x42, 0x8d, 0x2e, 0x0c, + 0x9c, 0x51, 0x0b, 0xdd, 0xa0, 0x1c, 0x45, 0x2f, 0x15, 0x06, 0xf6, 0x52, 0x00, 0x2e, 0x57, 0x49, + 0x3b, 0x24, 0x4d, 0x27, 0x26, 0x2d, 0x93, 0x22, 0x4e, 0xd5, 0xa3, 0xe7, 0x8d, 0xca, 0x67, 0x04, + 0xa6, 0x59, 0x78, 0x77, 0x4e, 0x23, 0x7a, 0x77, 0xdc, 0x09, 0xa2, 0x78, 0x9d, 0xc4, 0xf7, 0x83, + 0x70, 0x57, 0xc4, 0xea, 0x4b, 0xc2, 0xc6, 0x26, 0x20, 0xac, 0xe3, 0x51, 0x59, 0x91, 0x3d, 0x67, + 0xd6, 0xaa, 0xec, 0x75, 0x69, 0x2c, 0xd9, 0x27, 0x37, 0x78, 0x31, 0x96, 0x70, 0x89, 0x5a, 0xab, + 0x2f, 0xb3, 0x37, 0xa3, 0x14, 0x6a, 0xad, 0xbe, 0x8c, 0x25, 0x1c, 0x91, 0xee, 0x34, 0xbb, 0x53, + 0xf9, 0xaf, 0x7d, 0xdd, 0x7c, 0x79, 0xc0, 0x4c, 0xbb, 0x3e, 0xcc, 0xa8, 0x04, 0xbf, 0x3c, 0x88, + 0x61, 0x54, 0x9e, 0x66, 0x8b, 0x64, 0xf0, 0x08, 0x88, 0x4a, 0xdb, 0x59, 0x4b, 0x51, 0xc2, 0x5d, + 0xb4, 0x8d, 0x70, 0x32, 0x33, 0x7d, 0xf3, 0x51, 0x2d, 0x40, 0x29, 0xea, 0x6c, 0xb6, 0x82, 0x3d, + 0xc7, 0xf5, 0xd9, 0x13, 0x8f, 0x26, 0x88, 0x34, 0x24, 0x00, 0x27, 0x38, 0x68, 0x15, 0xc6, 0x1c, + 0x71, 0x2d, 0x15, 0x8f, 0x32, 0x99, 0xf1, 0x25, 0xe4, 0xd5, 0x95, 0x8b, 0xd9, 0xf2, 0x1f, 0x56, + 0x75, 0xd1, 0x9b, 0x30, 0x29, 0x5c, 0x20, 0x85, 0xf5, 0xf2, 0x39, 0xd3, 0x5b, 0xa6, 0xa1, 0x03, + 0xb1, 0x89, 0x8b, 0xbe, 0x03, 0xa6, 0x28, 0x95, 0x84, 0xb1, 0x95, 0xcf, 0x0f, 0xc2, 0x11, 0xb5, + 0x3c, 0x23, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0x5a, 0xf0, 0xa4, 0xd3, 0x89, 0x03, 0xa6, 0x0e, 0x36, + 0xd7, 0xff, 0x46, 0xb0, 0x4b, 0x7c, 0xf6, 0x12, 0x33, 0xb6, 0x74, 0xf9, 0xe8, 0xb0, 0xf2, 0xe4, + 0x62, 0x0f, 0x3c, 0xdc, 0x93, 0x0a, 0xba, 0x03, 0xe3, 0x71, 0xe0, 0x09, 0xb7, 0x83, 0xa8, 0xfc, + 0x58, 0x7e, 0x38, 0xac, 0x0d, 0x85, 0xa6, 0x2b, 0x5a, 0x54, 0x55, 0xac, 0xd3, 0x41, 0x1b, 0x7c, + 0x8f, 0xb1, 0x40, 0xc1, 0x24, 0x2a, 0x3f, 0x9e, 0x3f, 0x30, 0x2a, 0x9e, 0xb0, 0xb9, 0x05, 0x45, + 0x4d, 0xac, 0x93, 0x41, 0xd7, 0x61, 0xb6, 0x1d, 0xba, 0x01, 0x5b, 0xd8, 0x4a, 0x15, 0x5f, 0x36, + 0x53, 0x4b, 0xd4, 0xd3, 0x08, 0xb8, 0xbb, 0x0e, 0xbd, 0x88, 0xc9, 0xc2, 0xf2, 0x45, 0x9e, 0xa7, + 0x8c, 0x0b, 0xa7, 0xbc, 0x0c, 0x2b, 0x28, 0x5a, 0x63, 0x7c, 0x99, 0x5f, 0x99, 0xca, 0x73, 0xf9, + 0x71, 0x39, 0xf4, 0xab, 0x15, 0x17, 0x5c, 0xd4, 0x5f, 0x9c, 0x50, 0x98, 0xfb, 0x56, 0x98, 0xed, + 0x62, 0xbc, 0x27, 0xb2, 0x28, 0xff, 0xa7, 0xc3, 0x50, 0x52, 0x7a, 0x57, 0xb4, 0x60, 0xaa, 0xd3, + 0x2f, 0xa6, 0xd5, 0xe9, 0x63, 0x54, 0xfc, 0xd3, 0x35, 0xe8, 0x1b, 0x86, 0x3d, 0x54, 0x21, 0x3f, + 0xdd, 0x98, 0xae, 0x74, 0xe8, 0xeb, 0xfe, 0xa9, 0x5d, 0xa3, 0x8b, 0x03, 0xeb, 0xe5, 0x87, 0x7a, + 0xde, 0xcc, 0x07, 0xcc, 0xa0, 0x4c, 0x6f, 0x9a, 0xed, 0xa0, 0x55, 0xab, 0xa7, 0x53, 0x8a, 0xd6, + 0x69, 0x21, 0xe6, 0x30, 0x76, 0x57, 0xa0, 0x52, 0x02, 0xbb, 0x2b, 0x8c, 0x3e, 0xe4, 0x5d, 0x41, + 0x12, 0xc0, 0x09, 0x2d, 0xe4, 0xc1, 0x6c, 0xd3, 0xcc, 0x06, 0xab, 0x5c, 0x3e, 0x9f, 0xe9, 0x9b, + 0x97, 0xb5, 0xa3, 0xa5, 0x89, 0x5b, 0x4e, 0x53, 0xc1, 0xdd, 0x84, 0xd1, 0x9b, 0x30, 0xf6, 0x41, + 0x10, 0xb1, 0x55, 0x2c, 0x8e, 0x4a, 0xe9, 0x64, 0x37, 0xf6, 0xce, 0xed, 0x06, 0x2b, 0x3f, 0x3e, + 0xac, 0x8c, 0xd7, 0x83, 0x96, 0xfc, 0x8b, 0x55, 0x05, 0xf4, 0x00, 0x2e, 0x18, 0x0c, 0x46, 0x75, + 0x17, 0x06, 0xef, 0xee, 0x53, 0xa2, 0xb9, 0x0b, 0xb5, 0x2c, 0x4a, 0x38, 0xbb, 0x01, 0xfb, 0x17, + 0xb9, 0x76, 0x59, 0xe8, 0xa0, 0x48, 0xd4, 0xf1, 0xce, 0x22, 0x17, 0xd4, 0x8a, 0xa1, 0x1e, 0x7b, + 0xe8, 0x17, 0x8c, 0x5f, 0xb5, 0xd8, 0x0b, 0xc6, 0x06, 0xd9, 0x6b, 0x7b, 0x4e, 0x7c, 0x16, 0x7e, + 0x04, 0xef, 0xc0, 0x58, 0x2c, 0x5a, 0xeb, 0x95, 0xbe, 0x4a, 0xeb, 0x14, 0x7b, 0xc5, 0x51, 0xe7, + 0xab, 0x2c, 0xc5, 0x8a, 0x8c, 0xfd, 0xcf, 0xf9, 0x0c, 0x48, 0xc8, 0x19, 0xa8, 0x2a, 0xaa, 0xa6, + 0xaa, 0xa2, 0xd2, 0xe7, 0x0b, 0x72, 0x54, 0x16, 0xff, 0xcc, 0xec, 0x37, 0xbb, 0xca, 0x7c, 0xd2, + 0x9f, 0xce, 0xec, 0x1f, 0xb6, 0xe0, 0x7c, 0x96, 0x35, 0x08, 0x95, 0x89, 0xf8, 0x45, 0x4a, 0x3d, + 0x25, 0xaa, 0x11, 0xbc, 0x2b, 0xca, 0xb1, 0xc2, 0x18, 0x38, 0x59, 0xc7, 0xc9, 0x22, 0xca, 0xdd, + 0x06, 0x33, 0x71, 0x30, 0x7a, 0x8b, 0x3b, 0x06, 0x59, 0x2a, 0xb3, 0xef, 0xc9, 0x9c, 0x82, 0xec, + 0x9f, 0x2a, 0xc0, 0x79, 0xfe, 0x16, 0xb0, 0xb8, 0x1f, 0xb8, 0xad, 0x7a, 0xd0, 0x12, 0x6e, 0x52, + 0xef, 0xc2, 0x44, 0x5b, 0xbb, 0xfd, 0xf6, 0x8a, 0x69, 0xa5, 0xdf, 0x92, 0x93, 0x5b, 0x88, 0x5e, + 0x8a, 0x0d, 0x5a, 0xa8, 0x05, 0x13, 0x64, 0xdf, 0x6d, 0x2a, 0x85, 0x72, 0xe1, 0xc4, 0x2c, 0x5d, + 0xb5, 0xb2, 0xa2, 0xd1, 0xc1, 0x06, 0xd5, 0x47, 0x90, 0xe8, 0xcd, 0xfe, 0x11, 0x0b, 0x1e, 0xcf, + 0x89, 0x80, 0x45, 0x9b, 0xbb, 0xcf, 0x5e, 0x5d, 0x44, 0xce, 0x28, 0xd5, 0x1c, 0x7f, 0x8b, 0xc1, + 0x02, 0x8a, 0xbe, 0x04, 0xc0, 0xdf, 0x52, 0xa8, 0x50, 0x2e, 0x3e, 0x7d, 0xb0, 0xc8, 0x30, 0x5a, + 0xf8, 0x10, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x9f, 0x28, 0xc2, 0x30, 0xd3, 0xdd, 0xa3, 0x55, 0x18, + 0xdd, 0xe1, 0xf1, 0xb6, 0x07, 0x09, 0xed, 0x9d, 0xdc, 0x6e, 0x78, 0x01, 0x96, 0x95, 0xd1, 0x1a, + 0x9c, 0x13, 0xae, 0x78, 0x55, 0xe2, 0x39, 0x07, 0xf2, 0x92, 0xcc, 0xf3, 0x2c, 0xa9, 0xcc, 0x62, + 0xb5, 0x6e, 0x14, 0x9c, 0x55, 0x0f, 0xbd, 0xd5, 0x15, 0x65, 0x93, 0x47, 0x2a, 0x57, 0x22, 0x75, + 0x9f, 0x48, 0x9b, 0x6f, 0xc2, 0x64, 0xbb, 0x4b, 0x1d, 0x30, 0x9c, 0x88, 0xfb, 0xa6, 0x0a, 0xc0, + 0xc4, 0x65, 0x66, 0x20, 0x1d, 0x66, 0xf4, 0xb2, 0xb1, 0x13, 0x92, 0x68, 0x27, 0xf0, 0x5a, 0x22, + 0xf5, 0x79, 0x62, 0x06, 0x92, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x7a, 0x9d, 0x90, + 0x24, 0x54, 0x46, 0x4c, 0x2a, 0xab, 0x29, 0x38, 0xee, 0xaa, 0x41, 0xd7, 0xd1, 0x05, 0x91, 0x37, + 0x5b, 0x06, 0x68, 0x50, 0xb6, 0x3d, 0xa3, 0xd2, 0x51, 0xa3, 0x47, 0xd0, 0x20, 0x61, 0x5b, 0xa1, + 0x32, 0x6f, 0x6b, 0x59, 0x59, 0x85, 0x8b, 0x86, 0xa4, 0xf2, 0x30, 0xd9, 0x9b, 0x7f, 0xd7, 0x82, + 0x73, 0x19, 0x36, 0x84, 0x9c, 0x55, 0x6d, 0xbb, 0x51, 0xac, 0xd2, 0xfb, 0x68, 0xac, 0x8a, 0x97, + 0x63, 0x85, 0x41, 0xf7, 0x03, 0x67, 0x86, 0x69, 0x06, 0x28, 0x6c, 0x74, 0x04, 0xf4, 0x64, 0x0c, + 0x10, 0x5d, 0x86, 0xa1, 0x4e, 0x44, 0x42, 0x99, 0xf6, 0x58, 0xf2, 0x6f, 0xa6, 0x60, 0x64, 0x10, + 0x2a, 0x51, 0x6e, 0x2b, 0xdd, 0x9e, 0x26, 0x51, 0x72, 0xed, 0x1e, 0x87, 0xd9, 0x3f, 0x50, 0x84, + 0x8b, 0xb9, 0x96, 0xc1, 0xb4, 0x4b, 0x7b, 0x81, 0xef, 0xc6, 0x81, 0x7a, 0x17, 0xe2, 0xd1, 0x6d, + 0x48, 0x7b, 0x67, 0x4d, 0x94, 0x63, 0x85, 0x81, 0x9e, 0x95, 0x59, 0xf1, 0xd3, 0x09, 0x8c, 0x96, + 0xaa, 0x46, 0x62, 0xfc, 0x41, 0x33, 0x91, 0x3d, 0x03, 0x43, 0xed, 0x20, 0xf0, 0xd2, 0xcc, 0x88, + 0x76, 0x37, 0x08, 0x3c, 0xcc, 0x80, 0xe8, 0x33, 0x62, 0x1c, 0x52, 0x0f, 0x21, 0xd8, 0x69, 0x05, + 0x91, 0x36, 0x18, 0xcf, 0xc3, 0xe8, 0x2e, 0x39, 0x08, 0x5d, 0x7f, 0x3b, 0xfd, 0x40, 0x76, 0x93, + 0x17, 0x63, 0x09, 0x37, 0xf3, 0x77, 0x8c, 0x9e, 0x46, 0xfe, 0x0e, 0x7d, 0x66, 0xc7, 0xfa, 0x1e, + 0x6d, 0xdf, 0x57, 0x84, 0x69, 0xbc, 0x54, 0xfd, 0xe6, 0x44, 0xdc, 0xe9, 0x9e, 0x88, 0xd3, 0xce, + 0xea, 0xd6, 0x7f, 0x36, 0x7e, 0xce, 0x82, 0x69, 0x16, 0xe3, 0x5a, 0x44, 0x67, 0x71, 0x03, 0xff, + 0x0c, 0x44, 0xb7, 0x67, 0x60, 0x38, 0xa4, 0x8d, 0xa6, 0x53, 0x35, 0xb1, 0x9e, 0x60, 0x0e, 0x43, + 0x4f, 0xc2, 0x10, 0xeb, 0x02, 0x9d, 0xbc, 0x09, 0x9e, 0xe5, 0xa2, 0xea, 0xc4, 0x0e, 0x66, 0xa5, + 0xcc, 0x4d, 0x16, 0x93, 0xb6, 0xe7, 0xf2, 0x4e, 0x27, 0x0a, 0xf5, 0x4f, 0x86, 0x9b, 0x6c, 0x66, + 0xd7, 0x3e, 0x9a, 0x9b, 0x6c, 0x36, 0xc9, 0xde, 0xd7, 0xa2, 0xff, 0x51, 0x80, 0x4b, 0x99, 0xf5, + 0x06, 0x76, 0x93, 0xed, 0x5d, 0xfb, 0x74, 0xec, 0x1c, 0xb2, 0xcd, 0x0f, 0x8a, 0x67, 0x68, 0x7e, + 0x30, 0x34, 0xa8, 0xe4, 0x38, 0x3c, 0x80, 0xf7, 0x6a, 0xe6, 0x90, 0x7d, 0x42, 0xbc, 0x57, 0x33, + 0xfb, 0x96, 0x73, 0xad, 0xfb, 0xd3, 0x42, 0xce, 0xb7, 0xb0, 0x0b, 0xde, 0x15, 0xca, 0x67, 0x18, + 0x30, 0x12, 0x92, 0xf0, 0x04, 0xe7, 0x31, 0xbc, 0x0c, 0x2b, 0x28, 0x72, 0x35, 0x3f, 0x50, 0xde, + 0xb5, 0x37, 0x4f, 0xb4, 0x65, 0xe6, 0xcd, 0xf7, 0x0f, 0x3d, 0x94, 0x4c, 0xda, 0x27, 0x74, 0x4d, + 0xbb, 0x94, 0x17, 0x07, 0xbf, 0x94, 0x4f, 0x64, 0x5f, 0xc8, 0xd1, 0x22, 0x4c, 0xef, 0xb9, 0x3e, + 0x65, 0x9b, 0x07, 0xa6, 0x28, 0xaa, 0xc2, 0x22, 0xac, 0x99, 0x60, 0x9c, 0xc6, 0x9f, 0x7b, 0x13, + 0x26, 0x1f, 0x5e, 0x8b, 0xf8, 0x8d, 0x22, 0x3c, 0xd1, 0x63, 0xdb, 0x73, 0x5e, 0x6f, 0xcc, 0x81, + 0xc6, 0xeb, 0xbb, 0xe6, 0xa1, 0x0e, 0xe7, 0xb7, 0x3a, 0x9e, 0x77, 0xc0, 0x2c, 0xfc, 0x48, 0x4b, + 0x62, 0x08, 0x59, 0x51, 0x05, 0xb0, 0x5f, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xd1, 0xdb, 0x80, 0x02, + 0x91, 0xb2, 0x36, 0x09, 0x90, 0xc3, 0x06, 0xbe, 0x98, 0x6c, 0xc6, 0xdb, 0x5d, 0x18, 0x38, 0xa3, + 0x16, 0x15, 0xfa, 0xe9, 0xa9, 0x74, 0xa0, 0xba, 0x95, 0x12, 0xfa, 0xb1, 0x0e, 0xc4, 0x26, 0x2e, + 0xba, 0x0e, 0xb3, 0xce, 0xbe, 0xe3, 0xf2, 0x80, 0x89, 0x92, 0x00, 0x97, 0xfa, 0x95, 0xee, 0x6e, + 0x31, 0x8d, 0x80, 0xbb, 0xeb, 0xa4, 0x1c, 0x51, 0x47, 0xf2, 0x1d, 0x51, 0x7b, 0xf3, 0xc5, 0x7e, + 0xaa, 0x58, 0xfb, 0x3f, 0x5b, 0xf4, 0xf8, 0xca, 0x48, 0x3b, 0x4f, 0xc7, 0x41, 0xa9, 0x14, 0x35, + 0x9f, 0x50, 0x35, 0x0e, 0xcb, 0x3a, 0x10, 0x9b, 0xb8, 0x7c, 0x41, 0x44, 0x89, 0xa3, 0x82, 0x21, + 0xba, 0x0b, 0xa7, 0x6f, 0x85, 0x81, 0xbe, 0x0c, 0xa3, 0x2d, 0x77, 0xdf, 0x8d, 0x82, 0x50, 0x6c, + 0x96, 0x13, 0x1a, 0x93, 0x27, 0x7c, 0xb0, 0xca, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0xbe, 0x02, 0x4c, + 0xca, 0x16, 0xdf, 0xe9, 0x04, 0xb1, 0x73, 0x06, 0xc7, 0xf2, 0x75, 0xe3, 0x58, 0xfe, 0x4c, 0x2f, + 0xcf, 0x77, 0xd6, 0xa5, 0xdc, 0xe3, 0xf8, 0x76, 0xea, 0x38, 0x7e, 0xae, 0x3f, 0xa9, 0xde, 0xc7, + 0xf0, 0xbf, 0xb0, 0x60, 0xd6, 0xc0, 0x3f, 0x83, 0xd3, 0x60, 0xd5, 0x3c, 0x0d, 0x9e, 0xee, 0xfb, + 0x0d, 0x39, 0xa7, 0xc0, 0xd7, 0x0b, 0xa9, 0xbe, 0x33, 0xee, 0xff, 0x01, 0x0c, 0xed, 0x38, 0x61, + 0xab, 0x57, 0xd8, 0xdf, 0xae, 0x4a, 0xf3, 0x37, 0x9c, 0xb0, 0xc5, 0x79, 0xf8, 0x4b, 0x2a, 0xf7, + 0xa8, 0x13, 0xb6, 0xfa, 0xfa, 0xe5, 0xb0, 0xa6, 0xd0, 0x35, 0x18, 0x89, 0x9a, 0x41, 0x5b, 0xd9, + 0xe4, 0x5d, 0xe6, 0x79, 0x49, 0x69, 0xc9, 0xf1, 0x61, 0x05, 0x99, 0xcd, 0xd1, 0x62, 0x2c, 0xf0, + 0xe7, 0xb6, 0xa1, 0xa4, 0x9a, 0x7e, 0xa4, 0x1e, 0x15, 0xbf, 0x55, 0x84, 0x73, 0x19, 0xeb, 0x02, + 0x45, 0xc6, 0x68, 0xbd, 0x32, 0xe0, 0x72, 0xfa, 0x88, 0xe3, 0x15, 0xb1, 0x1b, 0x4b, 0x4b, 0xcc, + 0xff, 0xc0, 0x8d, 0xde, 0x89, 0x48, 0xba, 0x51, 0x5a, 0xd4, 0xbf, 0x51, 0xda, 0xd8, 0x99, 0x0d, + 0x35, 0x6d, 0x48, 0xf5, 0xf4, 0x91, 0xce, 0xe9, 0x1f, 0x15, 0xe1, 0x7c, 0x56, 0xc0, 0x0c, 0xf4, + 0x9d, 0xa9, 0x24, 0x42, 0xaf, 0x0d, 0x1a, 0x6a, 0x83, 0x67, 0x16, 0x12, 0x11, 0xc6, 0xe6, 0xcd, + 0xb4, 0x42, 0x7d, 0x87, 0x59, 0xb4, 0xc9, 0x1c, 0xe5, 0x42, 0x9e, 0xfc, 0x49, 0x6e, 0xf1, 0xcf, + 0x0d, 0xdc, 0x01, 0x91, 0x35, 0x2a, 0x4a, 0x39, 0xca, 0xc9, 0xe2, 0xfe, 0x8e, 0x72, 0xb2, 0xe5, + 0x39, 0x17, 0xc6, 0xb5, 0xaf, 0x79, 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x14, 0xad, 0xdf, 0x8f, 0x74, + 0xd6, 0x7f, 0xc4, 0x82, 0x94, 0x25, 0x9c, 0x52, 0x49, 0x59, 0xb9, 0x2a, 0xa9, 0xcb, 0x30, 0x14, + 0x06, 0x1e, 0x49, 0xe7, 0x95, 0xc1, 0x81, 0x47, 0x30, 0x83, 0x50, 0x8c, 0x38, 0x51, 0x48, 0x4c, + 0xe8, 0x97, 0x2d, 0x71, 0x8d, 0x7a, 0x06, 0x86, 0x3d, 0xb2, 0x4f, 0xa4, 0x36, 0x42, 0xf1, 0xe4, + 0x5b, 0xb4, 0x10, 0x73, 0x98, 0xfd, 0x73, 0x43, 0xf0, 0x54, 0x4f, 0x57, 0x53, 0x7a, 0x65, 0xd9, + 0x76, 0x62, 0x72, 0xdf, 0x39, 0x48, 0x47, 0xbd, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, 0xbb, 0x5d, + 0x1e, 0x38, 0x33, 0xa5, 0xc0, 0x13, 0xf1, 0x32, 0x05, 0xd4, 0x54, 0x1c, 0x15, 0x4f, 0x43, 0x71, + 0x74, 0x15, 0x20, 0x8a, 0xbc, 0x15, 0x9f, 0x4a, 0x60, 0x2d, 0x61, 0x10, 0x9c, 0x04, 0x58, 0x6d, + 0xdc, 0x12, 0x10, 0xac, 0x61, 0xa1, 0x2a, 0xcc, 0xb4, 0xc3, 0x20, 0xe6, 0xfa, 0xd0, 0x2a, 0x37, + 0x45, 0x19, 0x36, 0xbd, 0xfc, 0xea, 0x29, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x87, 0x71, 0xe1, 0xf9, + 0x57, 0x0f, 0x02, 0x4f, 0xa8, 0x6a, 0x94, 0x61, 0x43, 0x23, 0x01, 0x61, 0x1d, 0x4f, 0xab, 0xc6, + 0x94, 0xac, 0xa3, 0x99, 0xd5, 0xb8, 0xa2, 0x55, 0xc3, 0x4b, 0x05, 0xcf, 0x19, 0x1b, 0x28, 0x78, + 0x4e, 0xa2, 0xbc, 0x2a, 0x0d, 0xfc, 0xae, 0x04, 0x7d, 0xd5, 0x3d, 0x3f, 0x3d, 0x04, 0xe7, 0xc4, + 0xc2, 0x79, 0xd4, 0xcb, 0xe5, 0x4e, 0xf7, 0x72, 0x39, 0x0d, 0xf5, 0xd6, 0x37, 0xd7, 0xcc, 0x59, + 0xaf, 0x99, 0x5f, 0x2c, 0xc2, 0x08, 0x9f, 0x8a, 0x33, 0x90, 0xe1, 0x57, 0x85, 0xd2, 0xaf, 0x47, + 0xd8, 0x18, 0xde, 0x97, 0xf9, 0xaa, 0x13, 0x3b, 0xfc, 0xfc, 0x52, 0x6c, 0x34, 0x51, 0x0f, 0xa2, + 0x79, 0x83, 0xd1, 0xce, 0xa5, 0xb4, 0x5a, 0xc0, 0x69, 0x68, 0x6c, 0xf7, 0x2b, 0x00, 0x11, 0x4b, + 0x9c, 0x4f, 0x69, 0x88, 0x00, 0x44, 0x2f, 0xf4, 0x68, 0xbd, 0xa1, 0x90, 0x79, 0x1f, 0x92, 0x25, + 0xa8, 0x00, 0x58, 0xa3, 0x38, 0xf7, 0x06, 0x94, 0x14, 0x72, 0x3f, 0x15, 0xc0, 0x84, 0x7e, 0xea, + 0x7d, 0x11, 0xa6, 0x53, 0x6d, 0x9d, 0x48, 0x83, 0xf0, 0xf3, 0x16, 0x4c, 0xf3, 0x2e, 0xaf, 0xf8, + 0xfb, 0x62, 0xb3, 0x7f, 0x08, 0xe7, 0xbd, 0x8c, 0x4d, 0x27, 0x66, 0x74, 0xf0, 0x4d, 0xaa, 0x34, + 0x06, 0x59, 0x50, 0x9c, 0xd9, 0x06, 0xba, 0x02, 0x63, 0xdc, 0xd1, 0xc5, 0xf1, 0x84, 0x73, 0xc2, + 0x04, 0x4f, 0x44, 0xc1, 0xcb, 0xb0, 0x82, 0xda, 0xbf, 0x6d, 0xc1, 0x2c, 0xef, 0xf9, 0x4d, 0x72, + 0xa0, 0x6e, 0xc7, 0x1f, 0x67, 0xdf, 0x45, 0x9e, 0x8d, 0x42, 0x4e, 0x9e, 0x0d, 0xfd, 0xd3, 0x8a, + 0x3d, 0x3f, 0xed, 0xa7, 0x2c, 0x10, 0x2b, 0xf0, 0x0c, 0xee, 0x81, 0xdf, 0x6a, 0xde, 0x03, 0xe7, + 0xf2, 0x17, 0x75, 0xce, 0x05, 0xf0, 0x4f, 0x2c, 0x98, 0xe1, 0x08, 0xc9, 0x43, 0xe4, 0xc7, 0x3a, + 0x0f, 0x83, 0x24, 0x7f, 0x53, 0xd9, 0xb6, 0xb3, 0x3f, 0xca, 0x98, 0xac, 0xa1, 0x9e, 0x93, 0xd5, + 0x92, 0x1b, 0xe8, 0x04, 0x49, 0x0d, 0x4f, 0x1c, 0x1a, 0xd6, 0xfe, 0x43, 0x0b, 0x10, 0x6f, 0xc6, + 0x38, 0x97, 0xe9, 0x69, 0xc7, 0x4a, 0x35, 0x4d, 0x50, 0xc2, 0x6a, 0x14, 0x04, 0x6b, 0x58, 0xa7, + 0x32, 0x3c, 0xa9, 0xd7, 0xe4, 0x62, 0xff, 0xd7, 0xe4, 0x13, 0x8c, 0xe8, 0x5f, 0x1f, 0x82, 0xb4, + 0x25, 0x34, 0xba, 0x0b, 0x13, 0x4d, 0xa7, 0xed, 0x6c, 0xba, 0x9e, 0x1b, 0xbb, 0x24, 0xea, 0x65, + 0x86, 0xb2, 0xac, 0xe1, 0x89, 0x77, 0x42, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x3c, 0x40, 0x3b, 0x74, + 0xf7, 0x5d, 0x8f, 0x6c, 0xb3, 0xab, 0x30, 0x73, 0x87, 0xe2, 0xb6, 0x15, 0xb2, 0x14, 0x6b, 0x18, + 0x19, 0xee, 0x33, 0xc5, 0x47, 0xe7, 0x3e, 0x33, 0x74, 0x42, 0xf7, 0x99, 0xe1, 0x81, 0xdc, 0x67, + 0x30, 0x3c, 0x26, 0xcf, 0x6e, 0xfa, 0x7f, 0xd5, 0xf5, 0x88, 0x10, 0xd8, 0xb8, 0x93, 0xd4, 0xdc, + 0xd1, 0x61, 0xe5, 0x31, 0x9c, 0x89, 0x81, 0x73, 0x6a, 0xa2, 0x2f, 0x41, 0xd9, 0xf1, 0xbc, 0xe0, + 0xbe, 0x1a, 0xb5, 0x95, 0xa8, 0xe9, 0x78, 0x49, 0xa4, 0xf4, 0xb1, 0xa5, 0x27, 0x8f, 0x0e, 0x2b, + 0xe5, 0xc5, 0x1c, 0x1c, 0x9c, 0x5b, 0xdb, 0xde, 0x85, 0x73, 0x0d, 0x12, 0xca, 0x3c, 0xa9, 0x6a, + 0x8b, 0x6d, 0x40, 0x29, 0x4c, 0x31, 0x95, 0x81, 0x62, 0x95, 0x68, 0x51, 0x2c, 0x25, 0x13, 0x49, + 0x08, 0xd9, 0x7f, 0x6c, 0xc1, 0xa8, 0xb0, 0xae, 0x3e, 0x03, 0x59, 0x66, 0xd1, 0xd0, 0x47, 0x56, + 0xb2, 0x19, 0x2f, 0xeb, 0x4c, 0xae, 0x26, 0xb2, 0x96, 0xd2, 0x44, 0x3e, 0xdd, 0x8b, 0x48, 0x6f, + 0x1d, 0xe4, 0x0f, 0x15, 0x61, 0xca, 0xb4, 0x2c, 0x3f, 0x83, 0x21, 0x58, 0x87, 0xd1, 0x48, 0xb8, + 0x31, 0x14, 0xf2, 0xed, 0x57, 0xd3, 0x93, 0x98, 0x58, 0xb9, 0x08, 0xc7, 0x05, 0x49, 0x24, 0xd3, + 0x3f, 0xa2, 0xf8, 0x08, 0xfd, 0x23, 0xfa, 0x19, 0xf7, 0x0f, 0x9d, 0x86, 0x71, 0xbf, 0xfd, 0x4b, + 0x8c, 0xf9, 0xeb, 0xe5, 0x67, 0x20, 0x17, 0x5c, 0x37, 0x8f, 0x09, 0xbb, 0xc7, 0xca, 0x12, 0x9d, + 0xca, 0x91, 0x0f, 0xfe, 0xb1, 0x05, 0xe3, 0x02, 0xf1, 0x0c, 0xba, 0xfd, 0x6d, 0x66, 0xb7, 0x9f, + 0xe8, 0xd1, 0xed, 0x9c, 0xfe, 0xfe, 0xdd, 0x82, 0xea, 0x6f, 0x3d, 0x08, 0xe3, 0x81, 0x32, 0x67, + 0x8c, 0xd1, 0xdb, 0x60, 0xd0, 0x0c, 0x3c, 0x71, 0x98, 0x3f, 0x99, 0xf8, 0xc9, 0xf2, 0xf2, 0x63, + 0xed, 0x37, 0x56, 0xd8, 0xcc, 0x8d, 0x33, 0x08, 0x63, 0x71, 0x80, 0x26, 0x6e, 0x9c, 0x41, 0x18, + 0x63, 0x06, 0x41, 0x2d, 0x80, 0xd8, 0x09, 0xb7, 0x49, 0x4c, 0xcb, 0x84, 0xcb, 0x7d, 0xfe, 0x2e, + 0xec, 0xc4, 0xae, 0x37, 0xef, 0xfa, 0x71, 0x14, 0x87, 0xf3, 0x35, 0x3f, 0xbe, 0x1d, 0xf2, 0xbb, + 0x81, 0xe6, 0xf8, 0xaa, 0x68, 0x61, 0x8d, 0xae, 0xf4, 0xbc, 0x62, 0x6d, 0x0c, 0x9b, 0x0f, 0x85, + 0xeb, 0xa2, 0x1c, 0x2b, 0x0c, 0xfb, 0x0d, 0xc6, 0x93, 0xd9, 0x00, 0x9d, 0xcc, 0x27, 0xf5, 0x37, + 0xc6, 0xd4, 0xd0, 0xb2, 0x57, 0x82, 0xaa, 0xee, 0xf9, 0xda, 0x9b, 0x05, 0xd2, 0x86, 0x75, 0xb7, + 0x80, 0xc4, 0x3d, 0x16, 0x7d, 0x7b, 0xd7, 0xfb, 0xf1, 0xcb, 0x7d, 0x78, 0xe9, 0x09, 0x5e, 0x8c, + 0x59, 0xf8, 0x55, 0x16, 0xa6, 0xb2, 0x56, 0x4f, 0xe7, 0x36, 0x59, 0x96, 0x00, 0x9c, 0xe0, 0xa0, + 0x05, 0x71, 0xb3, 0xe4, 0xfa, 0xb9, 0x27, 0x52, 0x37, 0x4b, 0xf9, 0xf9, 0xda, 0xd5, 0xf2, 0x15, + 0x18, 0x57, 0xf9, 0xe2, 0xea, 0x3c, 0xed, 0x96, 0x08, 0x40, 0xb0, 0x92, 0x14, 0x63, 0x1d, 0x07, + 0x6d, 0xc0, 0x74, 0xc4, 0x93, 0xd9, 0x49, 0x67, 0x28, 0xa1, 0x37, 0x78, 0x41, 0xbe, 0x3b, 0x37, + 0x4c, 0xf0, 0x31, 0x2b, 0xe2, 0x9b, 0x55, 0xba, 0x4f, 0xa5, 0x49, 0xa0, 0xb7, 0x60, 0xca, 0xd3, + 0x93, 0x7a, 0xd7, 0x85, 0x5a, 0x41, 0x99, 0x65, 0x1a, 0x29, 0xbf, 0xeb, 0x38, 0x85, 0x4d, 0x85, + 0x00, 0xbd, 0x44, 0x44, 0x2f, 0x73, 0xfc, 0x6d, 0x12, 0x89, 0x6c, 0x57, 0x4c, 0x08, 0xb8, 0x95, + 0x83, 0x83, 0x73, 0x6b, 0xa3, 0x6b, 0x30, 0x21, 0x3f, 0x5f, 0x73, 0x0e, 0x4c, 0x8c, 0x7f, 0x35, + 0x18, 0x36, 0x30, 0xd1, 0x7d, 0xb8, 0x20, 0xff, 0x6f, 0x84, 0xce, 0xd6, 0x96, 0xdb, 0x14, 0xbe, + 0x99, 0xe3, 0x8c, 0xc4, 0xa2, 0xf4, 0x84, 0x58, 0xc9, 0x42, 0x3a, 0x3e, 0xac, 0x5c, 0x16, 0xa3, + 0x96, 0x09, 0x67, 0x93, 0x98, 0x4d, 0x1f, 0xad, 0xc1, 0xb9, 0x1d, 0xe2, 0x78, 0xf1, 0xce, 0xf2, + 0x0e, 0x69, 0xee, 0xca, 0x4d, 0xc4, 0x5c, 0x0e, 0x35, 0x93, 0xd9, 0x1b, 0xdd, 0x28, 0x38, 0xab, + 0x1e, 0x7a, 0x0f, 0xca, 0xed, 0xce, 0xa6, 0xe7, 0x46, 0x3b, 0xeb, 0x41, 0xcc, 0x9e, 0xba, 0x55, + 0xba, 0x35, 0xe1, 0x9b, 0xa8, 0xdc, 0x2d, 0xeb, 0x39, 0x78, 0x38, 0x97, 0x02, 0xfa, 0x10, 0x2e, + 0xa4, 0x16, 0x83, 0xf0, 0x94, 0x9a, 0xca, 0x8f, 0x05, 0xd9, 0xc8, 0xaa, 0xc0, 0x3d, 0x66, 0x33, + 0x41, 0x38, 0xbb, 0x89, 0x8f, 0x66, 0x00, 0xf1, 0x01, 0xad, 0xac, 0x49, 0x37, 0xe8, 0xab, 0x30, + 0xa1, 0xaf, 0x22, 0x71, 0xc0, 0x3c, 0xdb, 0x2f, 0x81, 0xbd, 0x90, 0x8d, 0xd4, 0x8a, 0xd2, 0x61, + 0xd8, 0xa0, 0x68, 0x13, 0xc8, 0xfe, 0x3e, 0x74, 0x0b, 0xc6, 0x9a, 0x9e, 0x4b, 0xfc, 0xb8, 0x56, + 0xef, 0xe5, 0x53, 0xbf, 0x2c, 0x70, 0xc4, 0x80, 0x89, 0xe0, 0x79, 0xbc, 0x0c, 0x2b, 0x0a, 0xf6, + 0xaf, 0x14, 0xa0, 0xd2, 0x27, 0x12, 0x63, 0x4a, 0x07, 0x68, 0x0d, 0xa4, 0x03, 0x5c, 0x94, 0xc9, + 0xe3, 0xd6, 0x53, 0xf7, 0xcf, 0x54, 0x62, 0xb8, 0xe4, 0x16, 0x9a, 0xc6, 0x1f, 0xd8, 0x6e, 0x52, + 0x57, 0x23, 0x0e, 0xf5, 0xb5, 0xe8, 0x35, 0x9e, 0x0f, 0x86, 0x07, 0x97, 0xe8, 0x73, 0x55, 0xc1, + 0xf6, 0x2f, 0x15, 0xe0, 0x82, 0x1a, 0xc2, 0x3f, 0xbf, 0x03, 0x77, 0xa7, 0x7b, 0xe0, 0x4e, 0x41, + 0x91, 0x6e, 0xdf, 0x86, 0x91, 0xc6, 0x41, 0xd4, 0x8c, 0xbd, 0x01, 0x04, 0xa0, 0x67, 0xcc, 0xd8, + 0x32, 0xea, 0x98, 0x36, 0xe2, 0xcb, 0xfc, 0x15, 0x0b, 0xa6, 0x37, 0x96, 0xeb, 0x8d, 0xa0, 0xb9, + 0x4b, 0xe2, 0x45, 0xae, 0x26, 0xc2, 0x42, 0xfe, 0xb1, 0x1e, 0x52, 0xae, 0xc9, 0x92, 0x98, 0x2e, + 0xc3, 0xd0, 0x4e, 0x10, 0xc5, 0xe9, 0x57, 0xb6, 0x1b, 0x41, 0x14, 0x63, 0x06, 0xb1, 0x7f, 0xc7, + 0x82, 0x61, 0x96, 0xf2, 0xb4, 0x5f, 0x6a, 0xdc, 0x41, 0xbe, 0x0b, 0xbd, 0x0e, 0x23, 0x64, 0x6b, + 0x8b, 0x34, 0x63, 0x31, 0xab, 0xd2, 0xbb, 0x6e, 0x64, 0x85, 0x95, 0xd2, 0x43, 0x9f, 0x35, 0xc6, + 0xff, 0x62, 0x81, 0x8c, 0xee, 0x41, 0x29, 0x76, 0xf7, 0xc8, 0x62, 0xab, 0x25, 0xde, 0x29, 0x1e, + 0xc2, 0x99, 0x71, 0x43, 0x12, 0xc0, 0x09, 0x2d, 0xfb, 0x07, 0x0a, 0x00, 0x89, 0x43, 0x6f, 0xbf, + 0x4f, 0x5c, 0xea, 0xca, 0xfe, 0xfb, 0x6c, 0x46, 0xf6, 0x5f, 0x94, 0x10, 0xcc, 0xc8, 0xfd, 0xab, + 0x86, 0xa9, 0x38, 0xd0, 0x30, 0x0d, 0x9d, 0x64, 0x98, 0x96, 0x61, 0x36, 0x71, 0x48, 0x36, 0xa3, + 0x33, 0xb0, 0x78, 0xec, 0x1b, 0x69, 0x20, 0xee, 0xc6, 0xb7, 0xbf, 0xd7, 0x02, 0xe1, 0x6e, 0x30, + 0xc0, 0x62, 0x7e, 0x57, 0x26, 0xea, 0x34, 0x02, 0xba, 0x5e, 0xce, 0xf7, 0xbf, 0x10, 0x61, 0x5c, + 0xd5, 0xe1, 0x61, 0x04, 0x6f, 0x35, 0x68, 0xd9, 0x2d, 0x10, 0xd0, 0x2a, 0x61, 0x4a, 0x86, 0xfe, + 0xbd, 0xb9, 0x0a, 0xd0, 0x62, 0xb8, 0x5a, 0xe2, 0x3f, 0xc5, 0xaa, 0xaa, 0x0a, 0x82, 0x35, 0x2c, + 0xfb, 0x6f, 0x16, 0x60, 0x5c, 0x06, 0x10, 0xa5, 0xf7, 0xf8, 0xfe, 0xad, 0x9c, 0x28, 0x67, 0x00, + 0xcb, 0x94, 0x49, 0x09, 0xab, 0xd0, 0xf2, 0x7a, 0xa6, 0x4c, 0x09, 0xc0, 0x09, 0x0e, 0x7a, 0x1e, + 0x46, 0xa3, 0xce, 0x26, 0x43, 0x4f, 0x19, 0xd1, 0x37, 0x78, 0x31, 0x96, 0x70, 0xf4, 0x25, 0x98, + 0xe1, 0xf5, 0xc2, 0xa0, 0xed, 0x6c, 0x73, 0x0d, 0xd2, 0xb0, 0xf2, 0x6a, 0x9b, 0x59, 0x4b, 0xc1, + 0x8e, 0x0f, 0x2b, 0xe7, 0xd3, 0x65, 0x4c, 0xf7, 0xd8, 0x45, 0x85, 0xee, 0x8b, 0x99, 0xb4, 0xc3, + 0x0c, 0xba, 0x01, 0x23, 0x9c, 0xe5, 0x09, 0x16, 0xd4, 0xe3, 0x45, 0x49, 0x73, 0xb3, 0x61, 0x41, + 0xd4, 0x05, 0xd7, 0x14, 0xf5, 0xd1, 0x7b, 0x30, 0xde, 0x0a, 0xee, 0xfb, 0xf7, 0x9d, 0xb0, 0xb5, + 0x58, 0xaf, 0x89, 0x55, 0x93, 0x29, 0x39, 0x55, 0x13, 0x34, 0xdd, 0x75, 0x87, 0x69, 0x4f, 0x13, + 0x10, 0xd6, 0xc9, 0xa1, 0x0d, 0x16, 0xe3, 0x89, 0xa7, 0xb2, 0xef, 0x65, 0x75, 0xa6, 0xb2, 0xdf, + 0x6b, 0x94, 0x27, 0x45, 0x20, 0x28, 0x91, 0x08, 0x3f, 0x21, 0x64, 0x7f, 0xed, 0x1c, 0x18, 0xab, + 0xd5, 0xc8, 0x19, 0x60, 0x9d, 0x52, 0xce, 0x00, 0x0c, 0x63, 0x64, 0xaf, 0x1d, 0x1f, 0x54, 0xdd, + 0xb0, 0x57, 0xd2, 0x99, 0x15, 0x81, 0xd3, 0x4d, 0x53, 0x42, 0xb0, 0xa2, 0x93, 0x9d, 0xd8, 0xa1, + 0xf8, 0x31, 0x26, 0x76, 0x18, 0x3a, 0xc3, 0xc4, 0x0e, 0xeb, 0x30, 0xba, 0xed, 0xc6, 0x98, 0xb4, + 0x03, 0x71, 0xdc, 0x67, 0xae, 0x84, 0xeb, 0x1c, 0xa5, 0x3b, 0xc0, 0xb8, 0x00, 0x60, 0x49, 0x04, + 0xbd, 0xad, 0xf6, 0xc0, 0x48, 0xbe, 0xb4, 0xdc, 0xfd, 0xf8, 0x90, 0xb9, 0x0b, 0x44, 0x22, 0x87, + 0xd1, 0x87, 0x4d, 0xe4, 0xb0, 0x2a, 0xd3, 0x2f, 0x8c, 0xe5, 0x1b, 0x69, 0xb2, 0xec, 0x0a, 0x7d, + 0x92, 0x2e, 0x18, 0x89, 0x2a, 0x4a, 0xa7, 0x97, 0xa8, 0xe2, 0x7b, 0x2d, 0xb8, 0xd0, 0xce, 0xca, + 0xd9, 0x22, 0xd2, 0x27, 0xbc, 0x3e, 0x70, 0x52, 0x1a, 0xa3, 0x41, 0x76, 0x6d, 0xca, 0x44, 0xc3, + 0xd9, 0xcd, 0xd1, 0x81, 0x0e, 0x37, 0x5b, 0x22, 0xe7, 0xc2, 0x33, 0x39, 0x19, 0x2f, 0x7a, 0xe4, + 0xb9, 0x78, 0x34, 0x79, 0x16, 0x92, 0x5c, 0x17, 0x93, 0x1f, 0x39, 0xd7, 0xc5, 0xdb, 0x2a, 0xd7, + 0x45, 0x8f, 0x48, 0x3a, 0x3c, 0x93, 0x45, 0xdf, 0x0c, 0x17, 0x5a, 0x96, 0x8a, 0xe9, 0xd3, 0xc8, + 0x52, 0xf1, 0x15, 0x93, 0xd9, 0xf3, 0x94, 0x09, 0x2f, 0xf6, 0x61, 0xf6, 0x06, 0xdd, 0xde, 0xec, + 0x9e, 0x67, 0xe4, 0x98, 0x7d, 0xa8, 0x8c, 0x1c, 0x77, 0xf5, 0x5c, 0x17, 0xa8, 0x4f, 0x32, 0x07, + 0x8a, 0x34, 0x60, 0x86, 0x8b, 0xbb, 0xfa, 0x11, 0x74, 0x2e, 0x9f, 0xae, 0x3a, 0x69, 0xba, 0xe9, + 0x66, 0x1d, 0x42, 0xdd, 0x99, 0x33, 0xce, 0x9f, 0x4d, 0xe6, 0x8c, 0x0b, 0xa7, 0x9e, 0x39, 0xe3, + 0xb1, 0x33, 0xc8, 0x9c, 0xf1, 0xf8, 0xc7, 0x9a, 0x39, 0xa3, 0xfc, 0x08, 0x32, 0x67, 0xac, 0x27, + 0x99, 0x33, 0x2e, 0xe6, 0x4f, 0x49, 0x86, 0x55, 0x5a, 0x4e, 0xbe, 0x8c, 0xbb, 0x50, 0x6a, 0x4b, + 0x9f, 0x6a, 0x11, 0xea, 0x27, 0x3b, 0x51, 0x5f, 0x96, 0xe3, 0x35, 0x9f, 0x12, 0x05, 0xc2, 0x09, + 0x29, 0x4a, 0x37, 0xc9, 0x9f, 0xf1, 0x44, 0x0f, 0xc5, 0x58, 0x96, 0xca, 0x21, 0x3f, 0x6b, 0x86, + 0xfd, 0x57, 0x0b, 0x70, 0xa9, 0xf7, 0xba, 0x4e, 0xf4, 0x15, 0xf5, 0x44, 0xbf, 0x9e, 0xd2, 0x57, + 0xf0, 0x4b, 0x40, 0x82, 0x35, 0x70, 0xe0, 0x89, 0xeb, 0x30, 0xab, 0xcc, 0xd1, 0x3c, 0xb7, 0x79, + 0xa0, 0x25, 0xf0, 0x53, 0xae, 0x31, 0x8d, 0x34, 0x02, 0xee, 0xae, 0x83, 0x16, 0x61, 0xda, 0x28, + 0xac, 0x55, 0x85, 0xb0, 0xaf, 0x14, 0x24, 0x0d, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0xd7, 0x2d, 0x78, + 0x3c, 0x27, 0x64, 0xf5, 0xc0, 0x71, 0x15, 0xb6, 0x60, 0xba, 0x6d, 0x56, 0xed, 0x13, 0x7e, 0xc5, + 0x08, 0x8c, 0xad, 0xfa, 0x9a, 0x02, 0xe0, 0x34, 0xd1, 0xa5, 0x2b, 0xbf, 0xf6, 0x7b, 0x97, 0x3e, + 0xf5, 0x9b, 0xbf, 0x77, 0xe9, 0x53, 0xbf, 0xfd, 0x7b, 0x97, 0x3e, 0xf5, 0x17, 0x8f, 0x2e, 0x59, + 0xbf, 0x76, 0x74, 0xc9, 0xfa, 0xcd, 0xa3, 0x4b, 0xd6, 0x6f, 0x1f, 0x5d, 0xb2, 0x7e, 0xf7, 0xe8, + 0x92, 0xf5, 0x03, 0xbf, 0x7f, 0xe9, 0x53, 0xef, 0x16, 0xf6, 0x5f, 0xf9, 0xff, 0x01, 0x00, 0x00, + 0xff, 0xff, 0x99, 0x05, 0xfa, 0x71, 0xfe, 0xdd, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d3653ac6..096d5240 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -32,7 +32,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -170,6 +170,23 @@ message Binding { optional ObjectReference target = 2; } +// Represents storage that is managed by an external CSI volume driver +message CSIPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + // Required. + optional string driver = 1; + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + optional string volumeHandle = 2; + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 3; +} + // Adds and removes POSIX capabilities from running containers. message Capabilities { // Added capabilities @@ -340,7 +357,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -376,7 +393,7 @@ message ConfigMapList { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -401,7 +418,7 @@ message ConfigMapProjection { } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -516,6 +533,13 @@ message Container { // +patchStrategy=merge repeated VolumeMount volumeMounts = 9; + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -765,6 +789,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -1001,7 +1029,6 @@ message EnvVarSource { } // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. message Event { // Standard object's metadata. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -1040,6 +1067,30 @@ message Event { // Type of this event (Normal, Warning), new types could be added in the future // +optional optional string type = 9; + + // Time when this Event was first observed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 11; + + // What action was taken/failed regarding to the Regarding object. + // +optional + optional string action = 12; + + // Optional secondary object for more complex actions. + // +optional + optional ObjectReference related = 13; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingComponent = 14; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 15; } // EventList is a list of events. @@ -1053,6 +1104,19 @@ message EventList { repeated Event items = 2; } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time of the last occurence observed + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // State of this Series: Ongoing or Finished + optional string state = 3; +} + // EventSource contains information for an event. message EventSource { // Component from which the event is generated. @@ -1150,7 +1214,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1299,21 +1363,22 @@ message HostPathVolumeSource { optional string type = 2; } -// Represents an ISCSI disk. +// ISCSIPersistentVolumeSource represents an ISCSI disk. // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port +message ISCSIPersistentVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). optional string targetPortal = 1; // Target iSCSI Qualified Name. optional string iqn = 2; - // iSCSI target lun number. + // iSCSI Target Lun number. optional int32 lun = 3; - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional optional string iscsiInterface = 4; @@ -1330,7 +1395,7 @@ message ISCSIVolumeSource { // +optional optional bool readOnly = 6; - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional repeated string portals = 7; @@ -1343,11 +1408,67 @@ message ISCSIVolumeSource { // +optional optional bool chapAuthSession = 11; - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional SecretReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication // +optional optional LocalObjectReference secretRef = 10; - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1907,12 +2028,12 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional @@ -1922,7 +2043,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ @@ -1938,7 +2059,7 @@ message ObjectMeta { // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids @@ -1950,7 +2071,7 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . @@ -1966,7 +2087,7 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. @@ -1987,7 +2108,7 @@ message ObjectMeta { // timestamp, until an administrator or automated process can determine the resource is // fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata @@ -2029,7 +2150,7 @@ message ObjectMeta { // this object has been completely initialized. Otherwise, the object is considered uninitialized // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to // observe uninitialized objects. - // + // // When an object is created, the system will populate this list with the current set of initializers. // Only privileged users may set or modify this list. Once it is empty, it may not be modified further // by any user. @@ -2198,6 +2319,12 @@ message PersistentVolumeClaimSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional optional string storageClassName = 5; + + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 6; } // PersistentVolumeClaimStatus is the current status of a persistent volume claim. @@ -2292,7 +2419,7 @@ message PersistentVolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - optional ISCSIVolumeSource iscsi = 7; + optional ISCSIPersistentVolumeSource iscsi = 7; // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md @@ -2352,6 +2479,10 @@ message PersistentVolumeSource { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; + + // CSI represents storage that handled by an external CSI driver + // +optional + optional CSIPersistentVolumeSource csi = 22; } // PersistentVolumeSpec is the specification of a persistent volume. @@ -2393,6 +2524,12 @@ message PersistentVolumeSpec { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional repeated string mountOptions = 7; + + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + optional string volumeMode = 8; } // PersistentVolumeStatus is the current status of a persistent volume. @@ -2489,10 +2626,7 @@ message PodAffinityTerm { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional + // Empty topologyKey is not allowed. optional string topologyKey = 3; } @@ -2583,6 +2717,38 @@ message PodCondition { optional string message = 6; } +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +message PodDNSConfig { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + repeated string nameservers = 1; + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + repeated string searches = 2; + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + repeated PodDNSConfigOption options = 3; +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +message PodDNSConfigOption { + // Required. + optional string name = 1; + + // +optional + optional string value = 2; +} + // PodExecOptions is the query options to a Pod's remote exec call. // --- // TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging @@ -2732,11 +2898,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -2807,10 +2973,13 @@ message PodSpec { // +optional optional int64 activeDeadlineSeconds = 5; - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional optional string dnsPolicy = 6; @@ -2919,6 +3088,13 @@ message PodSpec { // The higher the value, the higher the priority. // +optional optional int32 priority = 25; + + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + optional PodDNSConfig dnsConfig = 26; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3609,7 +3785,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -3647,7 +3823,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -3683,7 +3859,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. @@ -3978,7 +4154,8 @@ message ServiceSpec { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional optional string externalName = 10; @@ -4183,6 +4360,15 @@ message Volume { optional VolumeSource volumeSource = 2; } +// volumeDevice describes a mapping of a raw block device within a container. +message VolumeDevice { + // name must match the name of a persistentVolumeClaim in the pod + optional string name = 1; + + // devicePath is the path inside of the container that the device will be mapped to. + optional string devicePath = 2; +} + // VolumeMount describes a mounting of a Volume within a container. message VolumeMount { // This must match the Name of a Volume. @@ -4385,3 +4571,4 @@ message WeightedPodAffinityTerm { // Required. A pod affinity term, associated with the corresponding weight. optional PodAffinityTerm podAffinityTerm = 2; } + diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 4916ee3c..62bf076e 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -43,7 +43,7 @@ var ( AddToScheme = SchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Pod{}, diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index 3c369858..6ab69116 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -402,7 +402,7 @@ type PersistentVolumeSource struct { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` // Cinder represents a cinder volume attached and mounted on kubelets host machine // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional @@ -448,6 +448,9 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` + // CSI represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } const ( @@ -524,6 +527,11 @@ type PersistentVolumeSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options // +optional MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. @@ -541,6 +549,16 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + // PersistentVolumeStatus is the current status of a persistent volume. type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim. @@ -628,6 +646,11 @@ type PersistentVolumeClaimSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 // +optional StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -989,7 +1012,7 @@ type StorageMedium string const ( StorageMediumDefault StorageMedium = "" // use whatever the default is for the node StorageMediumMemory StorageMedium = "Memory" // use memory (tmpfs) - StorageMediumHugepages StorageMedium = "HugePages" // use hugepages + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -1213,14 +1236,15 @@ type NFSVolumeSource struct { // ISCSI volumes can only be mounted as read/write once. // ISCSI volumes support ownership management and SELinux relabeling. type ISCSIVolumeSource struct { - // iSCSI target portal. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` // Target iSCSI Qualified Name. IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI target lun number. + // iSCSI Target Lun number. Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). // +optional ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` // Filesystem type of the volume that you want to mount. @@ -1234,7 +1258,7 @@ type ISCSIVolumeSource struct { // Defaults to false. // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI target portal List. The portal is either an IP or ip_addr:port if the port + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port // is other than default (typically TCP ports 860 and 3260). // +optional Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` @@ -1244,10 +1268,56 @@ type ISCSIVolumeSource struct { // whether support iSCSI Session CHAP authentication // +optional SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP secret for iSCSI target and initiator authentication + // CHAP Secret for iSCSI target and initiator authentication // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI initiator name. + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface // : will be created for the connection. // +optional @@ -1621,6 +1691,23 @@ type LocalVolumeSource struct { Path string `json:"path" protobuf:"bytes,1,opt,name=path"` } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + // ContainerPort represents a network port in a single container. type ContainerPort struct { // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each @@ -1689,6 +1776,14 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Name of the environment variable. Must be a C_IDENTIFIER. @@ -2032,6 +2127,12 @@ type Container struct { // +patchMergeKey=mountPath // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` // Periodic probe of container liveness. // Container will be restarted if the probe fails. // Cannot be updated. @@ -2330,6 +2431,11 @@ const ( // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" + DefaultTerminationGracePeriodSeconds = 30 ) @@ -2486,11 +2592,8 @@ type PodAffinityTerm struct { // the labelSelector in the specified namespaces, where co-located is defined as running on a node // whose value of the label with key topologyKey matches that of any node on which any of the // selected pods is running. - // For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as "all topologies" - // ("all topologies" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); - // for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed. - // +optional - TopologyKey string `json:"topologyKey,omitempty" protobuf:"bytes,3,opt,name=topologyKey"` + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` } // Node affinity is a group of node affinity scheduling rules. @@ -2662,10 +2765,13 @@ type PodSpec struct { // Value must be a positive integer. // +optional ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for containers within the pod. - // One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. + // Set DNS policy for the pod. // Defaults to "ClusterFirst". - // To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. // +optional DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` // NodeSelector is a selector which must be true for the pod to fit on a node. @@ -2758,6 +2864,12 @@ type PodSpec struct { // The higher the value, the higher the priority. // +optional Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -2825,6 +2937,35 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -3288,7 +3429,8 @@ type ServiceSpec struct { // externalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. // +optional ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` @@ -3841,8 +3983,6 @@ const ( ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" // Default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" // Name prefix for huge page resources (alpha). @@ -4034,6 +4174,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `protobuf:"bytes,4,opt,name=propagationPolicy,casttype=DeletionPropagation"` } @@ -4326,7 +4470,6 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Event is a report of an event somewhere in the cluster. -// TODO: Decide whether to store these separately or with the object they apply to. type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -4366,8 +4509,51 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future // +optional Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` } +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` + // Time of the last occurence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` + // State of this Series: Ongoing or Finished + State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EventList is a list of events. @@ -4495,6 +4681,13 @@ const ( ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" ) +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + // A ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string @@ -4923,7 +5116,7 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 + DefaultHardPodAffinitySymmetricWeight int32 = 1 ) // Sysctl defines a kernel parameter to be set diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index d8176d2f..0f141b41 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -116,6 +116,17 @@ func (Binding) SwaggerDoc() map[string]string { return map_Binding } +var map_CSIPersistentVolumeSource = map[string]string{ + "": "Represents storage that is managed by an external CSI volume driver", + "driver": "Driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", +} + +func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CSIPersistentVolumeSource +} + var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -278,6 +289,7 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", @@ -392,7 +404,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "PropagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -529,16 +541,22 @@ func (EnvVarSource) SwaggerDoc() map[string]string { } var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", + "eventTime": "Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "action": "What action was taken/failed regarding to the Regarding object.", + "related": "Optional secondary object for more complex actions.", + "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", } func (Event) SwaggerDoc() map[string]string { @@ -555,6 +573,17 @@ func (EventList) SwaggerDoc() map[string]string { return map_EventList } +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time of the last occurence observed", + "state": "State of this Series: Ongoing or Finished", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + var map_EventSource = map[string]string{ "": "EventSource contains information for an event.", "component": "Component from which the event is generated.", @@ -698,19 +727,38 @@ func (HostPathVolumeSource) SwaggerDoc() map[string]string { return map_HostPathVolumeSource } -var map_ISCSIVolumeSource = map[string]string{ - "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", +var map_ISCSIPersistentVolumeSource = map[string]string{ + "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI target lun number.", - "iscsiInterface": "Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI target portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI initiator name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIPersistentVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", } func (ISCSIVolumeSource) SwaggerDoc() map[string]string { @@ -1151,6 +1199,7 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1212,6 +1261,7 @@ var map_PersistentVolumeSource = map[string]string{ "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "csi": "CSI represents storage that handled by an external CSI driver", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1226,6 +1276,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", } func (PersistentVolumeSpec) SwaggerDoc() map[string]string { @@ -1278,7 +1329,7 @@ var map_PodAffinityTerm = map[string]string{ "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", "labelSelector": "A label query over a set of resources, in this case pods.", "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. For PreferredDuringScheduling pod anti-affinity, empty topologyKey is interpreted as \"all topologies\" (\"all topologies\" here means all the topologyKeys indicated by scheduler command-line argument --failure-domains); for affinity and for RequiredDuringScheduling pod anti-affinity, empty topologyKey is not allowed.", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", } func (PodAffinityTerm) SwaggerDoc() map[string]string { @@ -1322,6 +1373,26 @@ func (PodCondition) SwaggerDoc() map[string]string { return map_PodCondition } +var map_PodDNSConfig = map[string]string{ + "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", +} + +func (PodDNSConfig) SwaggerDoc() map[string]string { + return map_PodDNSConfig +} + +var map_PodDNSConfigOption = map[string]string{ + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Required.", +} + +func (PodDNSConfigOption) SwaggerDoc() map[string]string { + return map_PodDNSConfigOption +} + var map_PodExecOptions = map[string]string{ "": "PodExecOptions is the query options to a Pod's remote exec call.", "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", @@ -1410,7 +1481,7 @@ var map_PodSpec = map[string]string{ "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for containers within the pod. One of 'ClusterFirstWithHostNet', 'ClusterFirst' or 'Default'. Defaults to \"ClusterFirst\". To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'. Note that 'None' policy is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", @@ -1429,6 +1500,7 @@ var map_PodSpec = map[string]string{ "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", "priorityClassName": "If specified, indicates the pod's priority. \"SYSTEM\" is a special keyword which indicates the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy. This is an alpha feature introduced in v1.9 and CustomPodDNS feature gate must be enabled to use it.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1956,7 +2028,7 @@ var map_ServiceSpec = map[string]string{ "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires Type to be ExternalName.", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery. This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints when that annotation is deprecated and all clients have been converted to use this field.", @@ -2065,6 +2137,16 @@ func (Volume) SwaggerDoc() map[string]string { return map_Volume } +var map_VolumeDevice = map[string]string{ + "": "volumeDevice describes a mapping of a raw block device within a container.", + "name": "name must match the name of a persistentVolumeClaim in the pod", + "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", +} + +func (VolumeDevice) SwaggerDoc() map[string]string { + return map_VolumeDevice +} + var map_VolumeMount = map[string]string{ "": "VolumeMount describes a mounting of a Volume within a container.", "name": "This must match the Name of a Volume.", diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 8263ba0c..951b2e26 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -23,745 +23,10 @@ package v1 import ( resource "k8s.io/apimachinery/pkg/api/resource" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { *out = *in @@ -981,6 +246,22 @@ func (in *Binding) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -1425,6 +706,11 @@ func (in *Container) DeepCopyInto(out *Container) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe if *in == nil { @@ -2097,6 +1383,25 @@ func (in *Event) DeepCopyInto(out *Event) { out.Source = in.Source in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } return } @@ -2153,6 +1458,23 @@ func (in *EventList) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSource) DeepCopyInto(out *EventSource) { *out = *in @@ -2448,6 +1770,45 @@ func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { *out = *in @@ -3590,6 +2951,15 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec **out = **in } } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -3750,7 +3120,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ISCSIVolumeSource) + *out = new(ISCSIPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3880,6 +3250,15 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { (*in).DeepCopyInto(*out) } } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } return } @@ -3923,6 +3302,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -4131,6 +3519,64 @@ func (in *PodCondition) DeepCopy() *PodCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { *out = *in @@ -4501,6 +3947,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { **out = **in } } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } return } @@ -6032,6 +5487,22 @@ func (in *Volume) DeepCopy() *Volume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go similarity index 75% rename from vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go rename to vendor/k8s.io/api/events/v1beta1/doc.go index cd40e74b..8b1a3e31 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -14,6 +14,8 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package unstructured provides conversion from runtime objects -// to map[string]interface{} representation. -package unstructured // import "k8s.io/apimachinery/pkg/conversion/unstructured" +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true + +// +groupName=events.k8s.io +package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go new file mode 100644 index 00000000..4861697c --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -0,0 +1,1306 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + + It has these top-level messages: + Event + EventList + EventSeries +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import k8s_io_api_core_v1 "k8s.io/api/core/v1" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") +} +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n2, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if m.Series != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n3, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) + n4, err := m.Regarding.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if m.Related != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n5, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i += copy(dAtA[i:], m.Note) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) + n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) + n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) + n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x78 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + return i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Event) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Regarding.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Note) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedFirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedLastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DeprecatedCount)) + return n +} + +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, + `Note:` + fmt.Sprintf("%v", this.Note) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &k8s_io_api_core_v1.ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Note = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) + } + m.DeprecatedCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 814 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb6, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x3a, 0x40, + 0x60, 0x14, 0x08, 0x59, 0xa7, 0x45, 0xdb, 0x6b, 0x18, 0xbb, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0xe8, 0xad, 0xa5, 0x5d, 0x62, 0x77, 0x29, 0xc0, 0xb7, 0x5e, 0x0a, + 0xf4, 0xd8, 0x67, 0xe8, 0x13, 0xf4, 0x31, 0x7c, 0xcc, 0x31, 0x27, 0xa1, 0x66, 0xdf, 0xa2, 0xa7, + 0x82, 0xcb, 0x95, 0x28, 0x8b, 0x16, 0xec, 0x22, 0x37, 0x72, 0xe6, 0xfb, 0x99, 0x19, 0x0e, 0x07, + 0x45, 0xe7, 0xdf, 0xe9, 0x80, 0xcb, 0xf0, 0x3c, 0x1b, 0x80, 0x12, 0x60, 0x40, 0x87, 0x13, 0x10, + 0x43, 0xa9, 0x42, 0x97, 0x60, 0x29, 0x0f, 0x61, 0x02, 0xc2, 0xe8, 0x70, 0xb2, 0x3f, 0x00, 0xc3, + 0xf6, 0xc3, 0x04, 0x04, 0x28, 0x66, 0x60, 0x18, 0xa4, 0x4a, 0x1a, 0x89, 0x9f, 0x94, 0xd0, 0x80, + 0xa5, 0x3c, 0x28, 0xa1, 0x81, 0x83, 0xee, 0x3c, 0x4f, 0xb8, 0x39, 0xcb, 0x06, 0x41, 0x2c, 0xc7, + 0x61, 0x22, 0x13, 0x19, 0x5a, 0xc6, 0x20, 0x7b, 0x6f, 0xdf, 0xec, 0x8b, 0x7d, 0x2a, 0x95, 0x76, + 0x76, 0x17, 0x4c, 0x63, 0xa9, 0x20, 0x9c, 0xd4, 0xdc, 0x76, 0xbe, 0xae, 0x30, 0x63, 0x16, 0x9f, + 0x71, 0x01, 0xea, 0x22, 0x4c, 0xcf, 0x93, 0x22, 0xa0, 0xc3, 0x31, 0x18, 0x76, 0x13, 0x2b, 0x5c, + 0xc5, 0x52, 0x99, 0x30, 0x7c, 0x0c, 0x35, 0xc2, 0x37, 0xb7, 0x11, 0x74, 0x7c, 0x06, 0x63, 0x56, + 0xe3, 0x7d, 0xb5, 0x8a, 0x97, 0x19, 0x3e, 0x0a, 0xb9, 0x30, 0xda, 0xa8, 0x65, 0xd2, 0xee, 0x9f, + 0x6d, 0xd4, 0x3c, 0x2c, 0x26, 0x87, 0xdf, 0xa1, 0x8d, 0xa2, 0x85, 0x21, 0x33, 0x8c, 0x78, 0x7d, + 0x6f, 0xaf, 0xf3, 0xe2, 0xcb, 0xa0, 0x1a, 0xef, 0x5c, 0x31, 0x48, 0xcf, 0x93, 0x22, 0xa0, 0x83, + 0x02, 0x1d, 0x4c, 0xf6, 0x83, 0xb7, 0x83, 0x5f, 0x20, 0x36, 0xc7, 0x60, 0x58, 0x84, 0x2f, 0xa7, + 0xbd, 0x46, 0x3e, 0xed, 0xa1, 0x2a, 0x46, 0xe7, 0xaa, 0xf8, 0x1d, 0x6a, 0xdb, 0x8f, 0x74, 0xca, + 0xc7, 0x40, 0xee, 0x59, 0x8b, 0xf0, 0x6e, 0x16, 0xc7, 0x3c, 0x56, 0xb2, 0xa0, 0x45, 0x5b, 0xce, + 0xa1, 0x7d, 0x38, 0x53, 0xa2, 0x95, 0x28, 0x7e, 0x83, 0x5a, 0x1a, 0x14, 0x07, 0x4d, 0xee, 0x5b, + 0xf9, 0x67, 0xc1, 0xca, 0x05, 0x09, 0xac, 0xc0, 0x89, 0x45, 0x47, 0x28, 0x9f, 0xf6, 0x5a, 0xe5, + 0x33, 0x75, 0x0a, 0xf8, 0x18, 0x3d, 0x56, 0x90, 0x4a, 0x65, 0xb8, 0x48, 0x5e, 0x49, 0x61, 0x94, + 0x1c, 0x8d, 0x40, 0x91, 0xb5, 0xbe, 0xb7, 0xd7, 0x8e, 0x3e, 0x73, 0x65, 0x3c, 0xa6, 0x75, 0x08, + 0xbd, 0x89, 0x87, 0x7f, 0x40, 0x5b, 0xf3, 0xf0, 0x6b, 0xa1, 0x0d, 0x13, 0x31, 0x90, 0xa6, 0x15, + 0x7b, 0xe2, 0xc4, 0xb6, 0xe8, 0x32, 0x80, 0xd6, 0x39, 0xf8, 0x19, 0x6a, 0xb1, 0xd8, 0x70, 0x29, + 0x48, 0xcb, 0xb2, 0x1f, 0x39, 0x76, 0xeb, 0xa5, 0x8d, 0x52, 0x97, 0x2d, 0x70, 0x0a, 0x98, 0x96, + 0x82, 0xac, 0x5f, 0xc7, 0x51, 0x1b, 0xa5, 0x2e, 0x8b, 0x4f, 0x51, 0x5b, 0x41, 0xc2, 0xd4, 0x90, + 0x8b, 0x84, 0x6c, 0xd8, 0xb1, 0x3d, 0x5d, 0x1c, 0x5b, 0xf1, 0x37, 0x54, 0x9f, 0x99, 0xc2, 0x7b, + 0x50, 0x20, 0xe2, 0x85, 0x2f, 0x41, 0x67, 0x6c, 0x5a, 0x09, 0xe1, 0x37, 0x68, 0x5d, 0xc1, 0xa8, + 0x58, 0x34, 0xd2, 0xbe, 0xbb, 0x66, 0x27, 0x9f, 0xf6, 0xd6, 0x69, 0xc9, 0xa3, 0x33, 0x01, 0xdc, + 0x47, 0x6b, 0x42, 0x1a, 0x20, 0xc8, 0xf6, 0xf1, 0xc0, 0xf9, 0xae, 0xfd, 0x28, 0x0d, 0x50, 0x9b, + 0x29, 0x10, 0xe6, 0x22, 0x05, 0xd2, 0xb9, 0x8e, 0x38, 0xbd, 0x48, 0x81, 0xda, 0x0c, 0x06, 0xd4, + 0x1d, 0x42, 0xaa, 0x20, 0x2e, 0x14, 0x4f, 0x64, 0xa6, 0x62, 0x20, 0x0f, 0x6c, 0x61, 0xbd, 0x9b, + 0x0a, 0x2b, 0x97, 0xc3, 0xc2, 0x22, 0xe2, 0xe4, 0xba, 0x07, 0x4b, 0x02, 0xb4, 0x26, 0x89, 0x7f, + 0xf7, 0x10, 0xa9, 0x82, 0xdf, 0x73, 0xa5, 0xed, 0x62, 0x6a, 0xc3, 0xc6, 0x29, 0x79, 0x68, 0xfd, + 0xbe, 0xb8, 0xdb, 0xca, 0xdb, 0x6d, 0xef, 0x3b, 0x6b, 0x72, 0xb0, 0x42, 0x93, 0xae, 0x74, 0xc3, + 0xbf, 0x79, 0x68, 0xbb, 0x4a, 0x1e, 0xb1, 0xc5, 0x4a, 0x1e, 0xfd, 0xef, 0x4a, 0x7a, 0xae, 0x92, + 0xed, 0x83, 0x9b, 0x25, 0xe9, 0x2a, 0x2f, 0xfc, 0x12, 0x6d, 0x56, 0xa9, 0x57, 0x32, 0x13, 0x86, + 0x6c, 0xf6, 0xbd, 0xbd, 0x66, 0xb4, 0xed, 0x24, 0x37, 0x0f, 0xae, 0xa7, 0xe9, 0x32, 0x7e, 0xf7, + 0x2f, 0x0f, 0x95, 0xff, 0xfb, 0x11, 0xd7, 0x06, 0xff, 0x5c, 0x3b, 0x54, 0xc1, 0xdd, 0x1a, 0x29, + 0xd8, 0xf6, 0x4c, 0x75, 0x9d, 0xf3, 0xc6, 0x2c, 0xb2, 0x70, 0xa4, 0x0e, 0x51, 0x93, 0x1b, 0x18, + 0x6b, 0x72, 0xaf, 0x7f, 0x7f, 0xaf, 0xf3, 0xa2, 0x7f, 0xdb, 0x05, 0x89, 0x1e, 0x3a, 0xb1, 0xe6, + 0xeb, 0x82, 0x46, 0x4b, 0xf6, 0x6e, 0xee, 0xa1, 0xce, 0xc2, 0x85, 0xc1, 0x4f, 0x51, 0x33, 0xb6, + 0xbd, 0x7b, 0xb6, 0xf7, 0x39, 0xa9, 0xec, 0xb8, 0xcc, 0xe1, 0x0c, 0x75, 0x47, 0x4c, 0x9b, 0xb7, + 0x03, 0x0d, 0x6a, 0x02, 0xc3, 0x4f, 0xb9, 0x93, 0xf3, 0xa5, 0x3d, 0x5a, 0x12, 0xa4, 0x35, 0x0b, + 0xfc, 0x2d, 0x6a, 0x6a, 0xc3, 0x0c, 0xd8, 0xa3, 0xd9, 0x8e, 0x3e, 0x9f, 0xd5, 0x76, 0x52, 0x04, + 0xff, 0x9d, 0xf6, 0xba, 0x0b, 0x8d, 0xd8, 0x18, 0x2d, 0xf1, 0xd1, 0xf3, 0xcb, 0x2b, 0xbf, 0xf1, + 0xe1, 0xca, 0x6f, 0x7c, 0xbc, 0xf2, 0x1b, 0xbf, 0xe6, 0xbe, 0x77, 0x99, 0xfb, 0xde, 0x87, 0xdc, + 0xf7, 0x3e, 0xe6, 0xbe, 0xf7, 0x77, 0xee, 0x7b, 0x7f, 0xfc, 0xe3, 0x37, 0x7e, 0x5a, 0x77, 0xf3, + 0xfa, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x69, 0xa9, 0x7b, 0x6e, 0xf2, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto new file mode 100644 index 00000000..c13a12a5 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.events.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +message Event { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required. Time when this Event was first observed. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 3; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingController = 4; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 5; + + // What action was taken/failed regarding to the regarding object. + // +optional + optional string action = 6; + + // Why the action was taken. + optional string reason = 7; + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + optional k8s.io.api.core.v1.ObjectReference regarding = 8; + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + optional k8s.io.api.core.v1.ObjectReference related = 9; + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + optional string note = 10; + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + optional string type = 11; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional int32 deprecatedCount = 15; +} + +// EventList is a list of Event objects. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time when last Event from the series was seen before last heartbeat. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // Information whether this series is ongoing or finished. + optional string state = 3; +} + diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go new file mode 100644 index 00000000..45067829 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go new file mode 100644 index 00000000..1b68bd74 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -0,0 +1,122 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +type Event struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required. Time when this Event was first observed. + EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` + + // What action was taken/failed regarding to the regarding object. + // +optional + Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` + + // Why the action was taken. + Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` + // Time when last Event from the series was seen before last heartbeat. + LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` + // Information whether this series is ongoing or finished. + State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of Event objects. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 00000000..04a4a912 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "eventTime": "Required. Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "action": "What action was taken/failed regarding to the regarding object.", + "reason": "Why the action was taken.", + "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "Type of this event (Normal, Warning), new types could be added in the future.", + "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of Event objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", + "state": "Information whether this series is ongoing or finished.", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000..2886eba1 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,127 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + out.Regarding = in.Regarding + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(v1.ObjectReference) + **out = **in + } + } + out.DeprecatedSource = in.DeprecatedSource + in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) + in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index ac174dd2..8ce18304 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index d508216a..ba5998e2 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -25,13 +25,14 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto It has these top-level messages: - APIVersion + AllowedFlexVolume AllowedHostPath CustomMetricCurrentStatus CustomMetricCurrentStatusList CustomMetricTarget CustomMetricTargetList DaemonSet + DaemonSetCondition DaemonSetList DaemonSetSpec DaemonSetStatus @@ -82,10 +83,6 @@ limitations under the License. ScaleSpec ScaleStatus SupplementalGroupsStrategyOptions - ThirdPartyResource - ThirdPartyResourceData - ThirdPartyResourceDataList - ThirdPartyResourceList */ package v1beta1 @@ -117,9 +114,9 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *APIVersion) Reset() { *m = APIVersion{} } -func (*APIVersion) ProtoMessage() {} -func (*APIVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } func (*AllowedHostPath) ProtoMessage() {} @@ -149,246 +146,233 @@ func (m *DaemonSet) Reset() { *m = DaemonSet{} } func (*DaemonSet) ProtoMessage() {} func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} + return fileDescriptorGenerated, []int{11} } func (m *Deployment) Reset() { *m = Deployment{} } func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } func (m *DeploymentList) Reset() { *m = DeploymentList{} } func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } func (m *HostPortRange) Reset() { *m = HostPortRange{} } func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } func (m *IDRange) Reset() { *m = IDRange{} } func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } func (m *IPBlock) Reset() { *m = IPBlock{} } func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } func (m *Ingress) Reset() { *m = Ingress{} } func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } func (m *IngressBackend) Reset() { *m = IngressBackend{} } func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *IngressList) Reset() { *m = IngressList{} } func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *IngressRule) Reset() { *m = IngressRule{} } func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *IngressSpec) Reset() { *m = IngressSpec{} } func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *IngressStatus) Reset() { *m = IngressStatus{} } func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *IngressTLS) Reset() { *m = IngressTLS{} } func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{33} + return fileDescriptorGenerated, []int{34} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} + return fileDescriptorGenerated, []int{35} } func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } func (*ReplicationControllerDummy) ProtoMessage() {} func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{47} + return fileDescriptorGenerated, []int{48} } func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{50} + return fileDescriptorGenerated, []int{51} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} + return fileDescriptorGenerated, []int{52} } func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{52} } +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } func (m *Scale) Reset() { *m = Scale{} } func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{56} + return fileDescriptorGenerated, []int{57} } -func (m *ThirdPartyResource) Reset() { *m = ThirdPartyResource{} } -func (*ThirdPartyResource) ProtoMessage() {} -func (*ThirdPartyResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *ThirdPartyResourceData) Reset() { *m = ThirdPartyResourceData{} } -func (*ThirdPartyResourceData) ProtoMessage() {} -func (*ThirdPartyResourceData) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } - -func (m *ThirdPartyResourceDataList) Reset() { *m = ThirdPartyResourceDataList{} } -func (*ThirdPartyResourceDataList) ProtoMessage() {} -func (*ThirdPartyResourceDataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{59} -} - -func (m *ThirdPartyResourceList) Reset() { *m = ThirdPartyResourceList{} } -func (*ThirdPartyResourceList) ProtoMessage() {} -func (*ThirdPartyResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } - func init() { - proto.RegisterType((*APIVersion)(nil), "k8s.io.api.extensions.v1beta1.APIVersion") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") @@ -439,12 +423,8 @@ func init() { proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") - proto.RegisterType((*ThirdPartyResource)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResource") - proto.RegisterType((*ThirdPartyResourceData)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceData") - proto.RegisterType((*ThirdPartyResourceDataList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceDataList") - proto.RegisterType((*ThirdPartyResourceList)(nil), "k8s.io.api.extensions.v1beta1.ThirdPartyResourceList") } -func (m *APIVersion) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -454,15 +434,15 @@ func (m *APIVersion) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *APIVersion) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l dAtA[i] = 0xa i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) return i, nil } @@ -650,6 +630,48 @@ func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -668,11 +690,11 @@ func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -707,28 +729,28 @@ func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 } dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n9, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + n9, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n9 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -787,6 +809,18 @@ func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -813,11 +847,11 @@ func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n10, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 } return i, nil } @@ -840,27 +874,27 @@ func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n12 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n13, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 return i, nil } @@ -898,19 +932,19 @@ func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n14, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n15, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n15 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 return i, nil } @@ -932,11 +966,11 @@ func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n16, err := m.ListMeta.MarshalTo(dAtA[i:]) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n16 + i += n17 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -996,11 +1030,11 @@ func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n17, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n17 + i += n18 return i, nil } @@ -1028,28 +1062,28 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n18, err := m.Selector.MarshalTo(dAtA[i:]) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n18 + i += n19 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n19, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n20, err := m.Strategy.MarshalTo(dAtA[i:]) + n20, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 dAtA[i] = 0x28 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -1070,11 +1104,11 @@ func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x42 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n21, err := m.RollbackTo.MarshalTo(dAtA[i:]) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n21 + i += n22 } if m.ProgressDeadlineSeconds != nil { dAtA[i] = 0x48 @@ -1160,11 +1194,11 @@ func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n22 + i += n23 } return i, nil } @@ -1225,11 +1259,11 @@ func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n23, err := m.Backend.MarshalTo(dAtA[i:]) + n24, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n23 + i += n24 return i, nil } @@ -1366,27 +1400,27 @@ func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n24, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n25, err := m.Spec.MarshalTo(dAtA[i:]) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n25 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n26, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n26, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n26 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n27, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 return i, nil } @@ -1412,11 +1446,11 @@ func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n27, err := m.ServicePort.MarshalTo(dAtA[i:]) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n27 + i += n28 return i, nil } @@ -1438,11 +1472,11 @@ func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n28, err := m.ListMeta.MarshalTo(dAtA[i:]) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n28 + i += n29 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1480,11 +1514,11 @@ func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n29, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n29 + i += n30 return i, nil } @@ -1507,11 +1541,11 @@ func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n30, err := m.HTTP.MarshalTo(dAtA[i:]) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n30 + i += n31 } return i, nil } @@ -1535,11 +1569,11 @@ func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n31, err := m.Backend.MarshalTo(dAtA[i:]) + n32, err := m.Backend.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.TLS) > 0 { for _, msg := range m.TLS { @@ -1586,11 +1620,11 @@ func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n32, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 return i, nil } @@ -1649,19 +1683,19 @@ func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n34 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n35, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 return i, nil } @@ -1767,11 +1801,11 @@ func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n35, err := m.ListMeta.MarshalTo(dAtA[i:]) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -1806,32 +1840,32 @@ func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n36, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n37, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n37 } - if m.IPBlock != nil { - dAtA[i] = 0x1a + if m.NamespaceSelector != nil { + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n38, err := m.IPBlock.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n38 } + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + } return i, nil } @@ -1860,11 +1894,11 @@ func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n39, err := m.Port.MarshalTo(dAtA[i:]) + n40, err := m.Port.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 } return i, nil } @@ -1887,11 +1921,11 @@ func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n40, err := m.PodSelector.MarshalTo(dAtA[i:]) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if len(m.Ingress) > 0 { for _, msg := range m.Ingress { dAtA[i] = 0x12 @@ -1952,19 +1986,19 @@ func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n41, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n42, err := m.Spec.MarshalTo(dAtA[i:]) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n42 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n43, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 return i, nil } @@ -1986,11 +2020,11 @@ func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n43, err := m.ListMeta.MarshalTo(dAtA[i:]) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n43 + i += n44 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2128,35 +2162,35 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x52 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n44, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n45, err := m.RunAsUser.MarshalTo(dAtA[i:]) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n45 - dAtA[i] = 0x62 + dAtA[i] = 0x5a i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n46, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n46 - dAtA[i] = 0x6a + dAtA[i] = 0x62 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n47, err := m.FSGroup.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n47 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 dAtA[i] = 0x70 i++ if m.ReadOnlyRootFilesystem { @@ -2201,6 +2235,20 @@ func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { i += n } } + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } return i, nil } @@ -2222,27 +2270,27 @@ func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n48, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n49, err := m.Spec.MarshalTo(dAtA[i:]) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n49 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n50, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n50, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n50 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n51, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 return i, nil } @@ -2272,11 +2320,11 @@ func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n51, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n51 + i += n52 dAtA[i] = 0x22 i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) @@ -2306,11 +2354,11 @@ func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n52, err := m.ListMeta.MarshalTo(dAtA[i:]) + n53, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n52 + i += n53 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -2350,20 +2398,20 @@ func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n53, err := m.Selector.MarshalTo(dAtA[i:]) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n53 + i += n54 } dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n54, err := m.Template.MarshalTo(dAtA[i:]) + n55, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n54 + i += n55 dAtA[i] = 0x20 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) @@ -2473,11 +2521,11 @@ func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n55, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n55 + i += n56 } return i, nil } @@ -2501,21 +2549,21 @@ func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n56 + i += n57 } if m.MaxSurge != nil { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n57, err := m.MaxSurge.MarshalTo(dAtA[i:]) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n57 + i += n58 } return i, nil } @@ -2577,11 +2625,11 @@ func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n58, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n58 + i += n59 } return i, nil } @@ -2604,27 +2652,27 @@ func (m *Scale) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n59, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n60, err := m.Spec.MarshalTo(dAtA[i:]) + n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n60 - dAtA[i] = 0x1a + dAtA[i] = 0x12 i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n61, err := m.Status.MarshalTo(dAtA[i:]) + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n61, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n61 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n62, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 return i, nil } @@ -2730,156 +2778,6 @@ func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) return i, nil } -func (m *ThirdPartyResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n62, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceData) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceData) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n63, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - if m.Data != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ThirdPartyResourceDataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceDataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n64, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ThirdPartyResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ThirdPartyResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n65, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) dAtA[offset+1] = uint8(v >> 8) @@ -2907,10 +2805,10 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } -func (m *APIVersion) Size() (n int) { +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) return n } @@ -2979,6 +2877,22 @@ func (m *DaemonSet) Size() (n int) { return n } +func (m *DaemonSetCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + func (m *DaemonSetList) Size() (n int) { var l int _ = l @@ -3026,6 +2940,12 @@ func (m *DaemonSetStatus) Size() (n int) { if m.CollisionCount != nil { n += 1 + sovGenerated(uint64(*m.CollisionCount)) } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3531,6 +3451,12 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } return n } @@ -3722,62 +3648,6 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { return n } -func (m *ThirdPartyResource) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceData) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ThirdPartyResourceDataList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ThirdPartyResourceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func sovGenerated(x uint64) (n int) { for { n++ @@ -3791,12 +3661,12 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *APIVersion) String() string { +func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&APIVersion{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, `}`, }, "") return s @@ -3865,6 +3735,20 @@ func (this *DaemonSet) String() string { }, "") return s } +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} func (this *DaemonSetList) String() string { if this == nil { return "nil" @@ -3905,6 +3789,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4303,6 +4188,7 @@ func (this *PodSecurityPolicySpec) String() string { `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4489,51 +4375,6 @@ func (this *SupplementalGroupsStrategyOptions) String() string { }, "") return s } -func (this *ThirdPartyResource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResource{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "APIVersion", "APIVersion", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceData) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceData{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceDataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceDataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResourceData", "ThirdPartyResourceData", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ThirdPartyResourceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ThirdPartyResourceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ThirdPartyResource", "ThirdPartyResource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} func valueToStringGenerated(v interface{}) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -4542,7 +4383,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *APIVersion) Unmarshal(dAtA []byte) error { +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4565,15 +4406,15 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: APIVersion: wiretype end group for non-group") + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4598,7 +4439,7 @@ func (m *APIVersion) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -5220,6 +5061,202 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } return nil } +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *DaemonSetList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -5733,6 +5770,37 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } } m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10207,6 +10275,37 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -12076,479 +12175,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *ThirdPartyResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, APIVersion{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceData) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceData: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceData: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceDataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceDataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResourceData{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ThirdPartyResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ThirdPartyResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ThirdPartyResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ThirdPartyResource{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 @@ -12659,232 +12285,229 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 3632 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5b, 0xcd, 0x6f, 0x24, 0xc7, - 0x75, 0xdf, 0x9e, 0x0f, 0xce, 0xf0, 0x71, 0xf9, 0x55, 0x5c, 0x91, 0x63, 0x4a, 0xcb, 0x59, 0xb7, - 0x80, 0xcd, 0x4a, 0x91, 0x66, 0xb4, 0x94, 0x56, 0x56, 0x24, 0xc4, 0x0e, 0x87, 0xdc, 0x0f, 0x3a, - 0xfc, 0x18, 0xd5, 0x0c, 0x69, 0x67, 0x91, 0x75, 0xd4, 0x9c, 0x29, 0x0e, 0x7b, 0xd9, 0xd3, 0xdd, - 0xee, 0xae, 0xa6, 0x39, 0x97, 0x20, 0x87, 0xc0, 0x40, 0x80, 0x04, 0x49, 0x0e, 0x0e, 0x94, 0x5b, - 0x7c, 0xc9, 0x29, 0x41, 0x7c, 0x4b, 0x0e, 0x86, 0x81, 0x00, 0x0e, 0xb0, 0x08, 0x9c, 0xc0, 0xa7, - 0xc4, 0x27, 0x22, 0xa2, 0x8e, 0xf9, 0x07, 0x82, 0x3d, 0x04, 0x41, 0x55, 0x57, 0x7f, 0x77, 0x73, - 0x66, 0xa8, 0x25, 0x11, 0xf8, 0x36, 0x5d, 0xef, 0xbd, 0xdf, 0x7b, 0xf5, 0xaa, 0xea, 0xbd, 0x57, - 0x1f, 0x03, 0x8f, 0x8e, 0x3f, 0xb2, 0x6b, 0xaa, 0x51, 0x3f, 0x76, 0x0e, 0x88, 0xa5, 0x13, 0x4a, - 0xec, 0xfa, 0x09, 0xd1, 0xbb, 0x86, 0x55, 0x17, 0x04, 0xc5, 0x54, 0xeb, 0xe4, 0x94, 0x12, 0xdd, - 0x56, 0x0d, 0xdd, 0xae, 0x9f, 0xdc, 0x3f, 0x20, 0x54, 0xb9, 0x5f, 0xef, 0x11, 0x9d, 0x58, 0x0a, - 0x25, 0xdd, 0x9a, 0x69, 0x19, 0xd4, 0x40, 0xb7, 0x5d, 0xf6, 0x9a, 0x62, 0xaa, 0xb5, 0x80, 0xbd, - 0x26, 0xd8, 0x97, 0xdf, 0xed, 0xa9, 0xf4, 0xc8, 0x39, 0xa8, 0x75, 0x8c, 0x7e, 0xbd, 0x67, 0xf4, - 0x8c, 0x3a, 0x97, 0x3a, 0x70, 0x0e, 0xf9, 0x17, 0xff, 0xe0, 0xbf, 0x5c, 0xb4, 0x65, 0x39, 0xa4, - 0xbc, 0x63, 0x58, 0xa4, 0x7e, 0x92, 0xd0, 0xb8, 0xfc, 0x56, 0x88, 0xc7, 0x34, 0x34, 0xb5, 0x33, - 0xc8, 0x32, 0x6e, 0xf9, 0x83, 0x80, 0xb5, 0xaf, 0x74, 0x8e, 0x54, 0x9d, 0x58, 0x83, 0xba, 0x79, - 0xdc, 0xe3, 0xb2, 0x16, 0xb1, 0x0d, 0xc7, 0xea, 0x90, 0xb1, 0xa4, 0xec, 0x7a, 0x9f, 0x50, 0x25, - 0xcd, 0xac, 0x7a, 0x96, 0x94, 0xe5, 0xe8, 0x54, 0xed, 0x27, 0xd5, 0x7c, 0x38, 0x4c, 0xc0, 0xee, - 0x1c, 0x91, 0xbe, 0x92, 0x90, 0x7b, 0x3f, 0x4b, 0xce, 0xa1, 0xaa, 0x56, 0x57, 0x75, 0x6a, 0x53, - 0x2b, 0x2e, 0x24, 0xd7, 0x00, 0xd6, 0x9a, 0x9b, 0xfb, 0xc4, 0x62, 0xc3, 0x83, 0xee, 0x40, 0x41, - 0x57, 0xfa, 0xa4, 0x22, 0xdd, 0x91, 0xee, 0x4d, 0x36, 0x6e, 0xbe, 0x38, 0xab, 0xde, 0x38, 0x3f, - 0xab, 0x16, 0x76, 0x94, 0x3e, 0xc1, 0x9c, 0x22, 0x3f, 0x84, 0xd9, 0x35, 0x4d, 0x33, 0x7e, 0x40, - 0xba, 0x4f, 0x0c, 0x9b, 0x36, 0x15, 0x7a, 0x84, 0x56, 0x01, 0x4c, 0x85, 0x1e, 0x35, 0x2d, 0x72, - 0xa8, 0x9e, 0x0a, 0x51, 0x24, 0x44, 0xa1, 0xe9, 0x53, 0x70, 0x88, 0x4b, 0xfe, 0x6b, 0x09, 0xbe, - 0xb6, 0xee, 0xd8, 0xd4, 0xe8, 0x6f, 0x13, 0x6a, 0xa9, 0x9d, 0x75, 0xc7, 0xb2, 0x88, 0x4e, 0x5b, - 0x54, 0xa1, 0x8e, 0x3d, 0xdc, 0x0c, 0xf4, 0x14, 0x8a, 0x27, 0x8a, 0xe6, 0x90, 0x4a, 0xee, 0x8e, - 0x74, 0x6f, 0x6a, 0xb5, 0x56, 0x0b, 0x66, 0x9b, 0xdf, 0xf7, 0x9a, 0x79, 0xdc, 0xe3, 0xd3, 0xcf, - 0x1b, 0xd0, 0xda, 0xa7, 0x8e, 0xa2, 0x53, 0x95, 0x0e, 0x1a, 0xb7, 0x04, 0xe4, 0x4d, 0xa1, 0x77, - 0x9f, 0x61, 0x61, 0x17, 0x52, 0xfe, 0x43, 0xb8, 0x9d, 0x69, 0xda, 0x96, 0x6a, 0x53, 0xf4, 0x0c, - 0x8a, 0x2a, 0x25, 0x7d, 0xbb, 0x22, 0xdd, 0xc9, 0xdf, 0x9b, 0x5a, 0xfd, 0xa8, 0x76, 0xe1, 0x54, - 0xaf, 0x65, 0x82, 0x35, 0xa6, 0x85, 0x19, 0xc5, 0x4d, 0x06, 0x87, 0x5d, 0x54, 0xf9, 0x2f, 0x25, - 0x40, 0x61, 0x99, 0xb6, 0x62, 0xf5, 0x08, 0x1d, 0xc1, 0x29, 0xbf, 0xf7, 0xd5, 0x9c, 0xb2, 0x20, - 0x20, 0xa7, 0x5c, 0x85, 0x11, 0x9f, 0x98, 0xb0, 0x98, 0x34, 0x89, 0x3b, 0x63, 0x3f, 0xea, 0x8c, - 0xfb, 0x63, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x8f, 0x72, 0x30, 0xb9, 0xa1, 0x90, 0xbe, 0xa1, - 0xb7, 0x08, 0x45, 0x9f, 0x41, 0x99, 0xad, 0xaf, 0xae, 0x42, 0x15, 0xee, 0x80, 0xa9, 0xd5, 0xf7, - 0x2e, 0xea, 0x9d, 0x5d, 0x63, 0xdc, 0xb5, 0x93, 0xfb, 0xb5, 0xdd, 0x83, 0xe7, 0xa4, 0x43, 0xb7, - 0x09, 0x55, 0x82, 0x39, 0x19, 0xb4, 0x61, 0x1f, 0x15, 0xed, 0x40, 0xc1, 0x36, 0x49, 0x47, 0xf8, - 0xee, 0x9d, 0x21, 0xdd, 0xf0, 0x2d, 0x6b, 0x99, 0xa4, 0x13, 0x0c, 0x06, 0xfb, 0xc2, 0x1c, 0x07, - 0xed, 0xc3, 0x84, 0xcd, 0x47, 0xb9, 0x92, 0x4f, 0x8c, 0xc6, 0xc5, 0x88, 0xee, 0xdc, 0x98, 0x11, - 0x98, 0x13, 0xee, 0x37, 0x16, 0x68, 0xf2, 0x4f, 0x25, 0x98, 0xf6, 0x79, 0xf9, 0x08, 0xfc, 0x7e, - 0xc2, 0x37, 0xb5, 0xd1, 0x7c, 0xc3, 0xa4, 0xb9, 0x67, 0xe6, 0x84, 0xae, 0xb2, 0xd7, 0x12, 0xf2, - 0xcb, 0xb6, 0x37, 0xbe, 0x39, 0x3e, 0xbe, 0xf7, 0x46, 0xed, 0x46, 0xc6, 0xb0, 0xfe, 0x55, 0x21, - 0x64, 0x3e, 0x73, 0x17, 0x7a, 0x06, 0x65, 0x9b, 0x68, 0xa4, 0x43, 0x0d, 0x4b, 0x98, 0xff, 0xfe, - 0x88, 0xe6, 0x2b, 0x07, 0x44, 0x6b, 0x09, 0xd1, 0xc6, 0x4d, 0x66, 0xbf, 0xf7, 0x85, 0x7d, 0x48, - 0xf4, 0x29, 0x94, 0x29, 0xe9, 0x9b, 0x9a, 0x42, 0xbd, 0x75, 0xf1, 0x66, 0xb8, 0x0b, 0x2c, 0x99, - 0x30, 0xb0, 0xa6, 0xd1, 0x6d, 0x0b, 0x36, 0x3e, 0xa4, 0xbe, 0x4b, 0xbc, 0x56, 0xec, 0xc3, 0xa0, - 0x13, 0x98, 0x71, 0xcc, 0x2e, 0xe3, 0xa4, 0x2c, 0x94, 0xf6, 0x06, 0x62, 0x88, 0x3f, 0x1c, 0xd5, - 0x37, 0x7b, 0x11, 0xe9, 0xc6, 0xa2, 0xd0, 0x35, 0x13, 0x6d, 0xc7, 0x31, 0x2d, 0x68, 0x0d, 0x66, - 0xfb, 0xaa, 0x8e, 0x89, 0xd2, 0x1d, 0xb4, 0x48, 0xc7, 0xd0, 0xbb, 0x76, 0xa5, 0x70, 0x47, 0xba, - 0x57, 0x6c, 0x2c, 0x09, 0x80, 0xd9, 0xed, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xb7, 0x01, 0x79, 0xdd, - 0x78, 0xec, 0x66, 0x02, 0xd5, 0xd0, 0x2b, 0xc5, 0x3b, 0xd2, 0xbd, 0x7c, 0x63, 0x59, 0xa0, 0xa0, - 0x76, 0x82, 0x03, 0xa7, 0x48, 0xa1, 0x2d, 0xb8, 0x65, 0x91, 0x13, 0x95, 0xf5, 0xf1, 0x89, 0x6a, - 0x53, 0xc3, 0x1a, 0x6c, 0xa9, 0x7d, 0x95, 0x56, 0x26, 0xb8, 0x4d, 0x95, 0xf3, 0xb3, 0xea, 0x2d, - 0x9c, 0x42, 0xc7, 0xa9, 0x52, 0xf2, 0x4f, 0x8a, 0x30, 0x1b, 0x5b, 0x03, 0x68, 0x1f, 0x16, 0x3b, - 0x6e, 0xc0, 0xdc, 0x71, 0xfa, 0x07, 0xc4, 0x6a, 0x75, 0x8e, 0x48, 0xd7, 0xd1, 0x48, 0x97, 0x4f, - 0x94, 0x62, 0x63, 0x45, 0x58, 0xbc, 0xb8, 0x9e, 0xca, 0x85, 0x33, 0xa4, 0x99, 0x17, 0x74, 0xde, - 0xb4, 0xad, 0xda, 0xb6, 0x8f, 0x99, 0xe3, 0x98, 0xbe, 0x17, 0x76, 0x12, 0x1c, 0x38, 0x45, 0x8a, - 0xd9, 0xd8, 0x25, 0xb6, 0x6a, 0x91, 0x6e, 0xdc, 0xc6, 0x7c, 0xd4, 0xc6, 0x8d, 0x54, 0x2e, 0x9c, - 0x21, 0x8d, 0x1e, 0xc0, 0x94, 0xab, 0x8d, 0x8f, 0x9f, 0x18, 0x68, 0x3f, 0x44, 0xef, 0x04, 0x24, - 0x1c, 0xe6, 0x63, 0x5d, 0x33, 0x0e, 0x6c, 0x62, 0x9d, 0x90, 0x6e, 0xf6, 0x00, 0xef, 0x26, 0x38, - 0x70, 0x8a, 0x14, 0xeb, 0x9a, 0x3b, 0x03, 0x13, 0x5d, 0x9b, 0x88, 0x76, 0x6d, 0x2f, 0x95, 0x0b, - 0x67, 0x48, 0xb3, 0x79, 0xec, 0x9a, 0xbc, 0x76, 0xa2, 0xa8, 0x9a, 0x72, 0xa0, 0x91, 0x4a, 0x29, - 0x3a, 0x8f, 0x77, 0xa2, 0x64, 0x1c, 0xe7, 0x47, 0x8f, 0x61, 0xde, 0x6d, 0xda, 0xd3, 0x15, 0x1f, - 0xa4, 0xcc, 0x41, 0xbe, 0x26, 0x40, 0xe6, 0x77, 0xe2, 0x0c, 0x38, 0x29, 0x83, 0x3e, 0x86, 0x99, - 0x8e, 0xa1, 0x69, 0x7c, 0x3e, 0xae, 0x1b, 0x8e, 0x4e, 0x2b, 0x93, 0x1c, 0x05, 0xb1, 0xf5, 0xb8, - 0x1e, 0xa1, 0xe0, 0x18, 0xa7, 0xfc, 0xaf, 0x12, 0x2c, 0x65, 0xac, 0x69, 0xf4, 0x2d, 0x28, 0xd0, - 0x81, 0xe9, 0x65, 0xeb, 0xdf, 0xf4, 0x12, 0x44, 0x7b, 0x60, 0x92, 0x97, 0x67, 0xd5, 0xd7, 0x33, - 0xc4, 0x18, 0x19, 0x73, 0x41, 0xa4, 0xc3, 0xb4, 0xc5, 0xd4, 0xe9, 0x3d, 0x97, 0x45, 0x04, 0xaf, - 0x07, 0x43, 0x62, 0x0c, 0x0e, 0xcb, 0x04, 0xc1, 0x78, 0xfe, 0xfc, 0xac, 0x3a, 0x1d, 0xa1, 0xe1, - 0x28, 0xbc, 0xfc, 0x79, 0x0e, 0x60, 0x83, 0x98, 0x9a, 0x31, 0xe8, 0x13, 0xfd, 0x3a, 0x12, 0xee, - 0x6e, 0x24, 0xe1, 0xbe, 0x3b, 0x2c, 0x76, 0xfa, 0xa6, 0x65, 0x66, 0xdc, 0xef, 0xc4, 0x32, 0x6e, - 0x7d, 0x74, 0xc8, 0x8b, 0x53, 0xee, 0x7f, 0xe6, 0x61, 0x21, 0x60, 0x5e, 0x37, 0xf4, 0xae, 0xca, - 0xd7, 0xc7, 0x27, 0x91, 0x31, 0xfe, 0x8d, 0xd8, 0x18, 0x2f, 0xa5, 0x88, 0x84, 0xc6, 0x77, 0xcb, - 0xb7, 0x36, 0xc7, 0xc5, 0x3f, 0x88, 0x2a, 0x7f, 0x79, 0x56, 0x4d, 0xd9, 0xf3, 0xd4, 0x7c, 0xa4, - 0xa8, 0x89, 0xe8, 0x2e, 0x4c, 0x58, 0x44, 0xb1, 0x0d, 0x9d, 0x07, 0x8a, 0xc9, 0xa0, 0x2b, 0x98, - 0xb7, 0x62, 0x41, 0x45, 0x6f, 0x41, 0xa9, 0x4f, 0x6c, 0x5b, 0xe9, 0x11, 0x1e, 0x13, 0x26, 0x1b, - 0xb3, 0x82, 0xb1, 0xb4, 0xed, 0x36, 0x63, 0x8f, 0x8e, 0x9e, 0xc3, 0x8c, 0xa6, 0xd8, 0x62, 0x82, - 0xb6, 0xd5, 0x3e, 0xe1, 0xab, 0x7e, 0x6a, 0xf5, 0xed, 0xd1, 0xe6, 0x01, 0x93, 0x08, 0x32, 0xdb, - 0x56, 0x04, 0x09, 0xc7, 0x90, 0xd1, 0x09, 0x20, 0xd6, 0xd2, 0xb6, 0x14, 0xdd, 0x76, 0x1d, 0xc5, - 0xf4, 0x95, 0xc6, 0xd6, 0xe7, 0x47, 0xb8, 0xad, 0x04, 0x1a, 0x4e, 0xd1, 0x20, 0xff, 0x4c, 0x82, - 0x99, 0x60, 0x98, 0xae, 0xa1, 0x9a, 0xda, 0x89, 0x56, 0x53, 0x6f, 0x8d, 0x3c, 0x45, 0x33, 0xca, - 0xa9, 0xff, 0xc9, 0x01, 0x0a, 0x98, 0xd8, 0x02, 0x3f, 0x50, 0x3a, 0xc7, 0x23, 0xec, 0x15, 0x7e, - 0x24, 0x01, 0x12, 0xe1, 0x79, 0x4d, 0xd7, 0x0d, 0xca, 0x23, 0xbe, 0x67, 0xd6, 0xe6, 0xc8, 0x66, - 0x79, 0x1a, 0x6b, 0x7b, 0x09, 0xac, 0x87, 0x3a, 0xb5, 0x06, 0xc1, 0x88, 0x24, 0x19, 0x70, 0x8a, - 0x01, 0x48, 0x01, 0xb0, 0x04, 0x66, 0xdb, 0x10, 0x0b, 0xf9, 0xdd, 0x11, 0x62, 0x1e, 0x13, 0x58, - 0x37, 0xf4, 0x43, 0xb5, 0x17, 0x84, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xf2, 0x43, 0x58, 0xca, - 0xb0, 0x16, 0xcd, 0x41, 0xfe, 0x98, 0x0c, 0x5c, 0xb7, 0x61, 0xf6, 0x13, 0xdd, 0x0a, 0xef, 0xa9, - 0x26, 0xc5, 0x76, 0xe8, 0xe3, 0xdc, 0x47, 0x92, 0xfc, 0xd3, 0x62, 0x78, 0xee, 0xf0, 0x52, 0xf6, - 0x1e, 0x94, 0x2d, 0x62, 0x6a, 0x6a, 0x47, 0xb1, 0x45, 0x85, 0xc2, 0xab, 0x52, 0x2c, 0xda, 0xb0, - 0x4f, 0x8d, 0x14, 0xbd, 0xb9, 0xab, 0x2d, 0x7a, 0xf3, 0xaf, 0xa6, 0xe8, 0xfd, 0x03, 0x28, 0xdb, - 0x5e, 0xb9, 0x5b, 0xe0, 0x90, 0xf7, 0xc7, 0x88, 0xaf, 0xa2, 0xd2, 0xf5, 0x15, 0xf8, 0x35, 0xae, - 0x0f, 0x9a, 0x56, 0xdd, 0x16, 0xc7, 0xac, 0x6e, 0x5f, 0x69, 0x45, 0xca, 0x62, 0xaa, 0xa9, 0x38, - 0x36, 0xe9, 0xf2, 0x40, 0x54, 0x0e, 0x62, 0x6a, 0x93, 0xb7, 0x62, 0x41, 0x45, 0xcf, 0x22, 0x53, - 0xb6, 0x7c, 0x99, 0x29, 0x3b, 0x93, 0x3d, 0x5d, 0xd1, 0x1e, 0x2c, 0x99, 0x96, 0xd1, 0xb3, 0x88, - 0x6d, 0x6f, 0x10, 0xa5, 0xab, 0xa9, 0x3a, 0xf1, 0xfc, 0xe3, 0x96, 0x2a, 0xaf, 0x9f, 0x9f, 0x55, - 0x97, 0x9a, 0xe9, 0x2c, 0x38, 0x4b, 0x56, 0x7e, 0x51, 0x80, 0xb9, 0x78, 0x06, 0xcc, 0xa8, 0x1e, - 0xa5, 0x4b, 0x55, 0x8f, 0xef, 0x84, 0x16, 0x83, 0x5b, 0x5a, 0xfb, 0xa3, 0x9f, 0xb2, 0x20, 0xd6, - 0x60, 0x56, 0x44, 0x03, 0x8f, 0x28, 0xea, 0x67, 0x7f, 0xf4, 0xf7, 0xa2, 0x64, 0x1c, 0xe7, 0x67, - 0x35, 0x61, 0x50, 0xea, 0x79, 0x20, 0x85, 0x68, 0x4d, 0xb8, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xb4, - 0x0d, 0x0b, 0x8e, 0x9e, 0x84, 0x72, 0x67, 0xe3, 0xeb, 0x02, 0x6a, 0x61, 0x2f, 0xc9, 0x82, 0xd3, - 0xe4, 0xd0, 0x21, 0x40, 0xc7, 0x4b, 0xdb, 0x76, 0x65, 0x82, 0x47, 0xd8, 0xd5, 0x91, 0xd7, 0x8e, - 0x9f, 0xf1, 0x83, 0xb8, 0xe6, 0x37, 0xd9, 0x38, 0x84, 0x8c, 0x3e, 0x81, 0x69, 0x8b, 0x6f, 0x08, - 0x3c, 0x83, 0xdd, 0xa2, 0xfa, 0x35, 0x21, 0x36, 0x8d, 0xc3, 0x44, 0x1c, 0xe5, 0x4d, 0xa9, 0x83, - 0xcb, 0x23, 0xd7, 0xc1, 0xff, 0x2c, 0x85, 0x93, 0x90, 0x5f, 0x02, 0x7f, 0x1c, 0x29, 0x8f, 0xee, - 0xc6, 0xca, 0xa3, 0xc5, 0xa4, 0x44, 0xa8, 0x3a, 0x32, 0xd2, 0xab, 0xdf, 0x0f, 0xc7, 0xaa, 0x7e, - 0x83, 0xe4, 0x39, 0xbc, 0xfc, 0xfd, 0xb1, 0x04, 0x8b, 0x8f, 0x5a, 0x8f, 0x2d, 0xc3, 0x31, 0x3d, - 0x73, 0x76, 0x4d, 0xd7, 0xaf, 0xdf, 0x80, 0x82, 0xe5, 0x68, 0x5e, 0x3f, 0xde, 0xf4, 0xfa, 0x81, - 0x1d, 0x8d, 0xf5, 0x63, 0x21, 0x26, 0xe5, 0x76, 0x82, 0x09, 0xa0, 0x1d, 0x98, 0xb0, 0x14, 0xbd, - 0x47, 0xbc, 0xb4, 0x7a, 0x77, 0x88, 0xf5, 0x9b, 0x1b, 0x98, 0xb1, 0x87, 0x8a, 0x37, 0x2e, 0x8d, - 0x05, 0x8a, 0xfc, 0x67, 0x12, 0xcc, 0x3e, 0x69, 0xb7, 0x9b, 0x9b, 0x3a, 0x5f, 0xd1, 0xfc, 0xf0, - 0xf5, 0x0e, 0x14, 0x4c, 0x85, 0x1e, 0xc5, 0x33, 0x3d, 0xa3, 0x61, 0x4e, 0x41, 0xdf, 0x85, 0x12, - 0x8b, 0x24, 0x44, 0xef, 0x8e, 0x58, 0x6a, 0x0b, 0xf8, 0x86, 0x2b, 0x14, 0x54, 0x88, 0xa2, 0x01, - 0x7b, 0x70, 0xf2, 0x31, 0xdc, 0x0a, 0x99, 0xc3, 0xfc, 0xc1, 0xcf, 0x0c, 0x51, 0x0b, 0x8a, 0x4c, - 0xb3, 0x77, 0x24, 0x38, 0xec, 0xe4, 0x2b, 0xd6, 0xa5, 0xa0, 0xd2, 0x61, 0x5f, 0x36, 0x76, 0xb1, - 0xe4, 0x6d, 0x98, 0xe6, 0x27, 0xce, 0x86, 0x45, 0xb9, 0x5b, 0xd0, 0x6d, 0xc8, 0xf7, 0x55, 0x5d, - 0xe4, 0xd9, 0x29, 0x21, 0x93, 0x67, 0x39, 0x82, 0xb5, 0x73, 0xb2, 0x72, 0x2a, 0x22, 0x4f, 0x40, - 0x56, 0x4e, 0x31, 0x6b, 0x97, 0x1f, 0x43, 0x49, 0xb8, 0x3b, 0x0c, 0x94, 0xbf, 0x18, 0x28, 0x9f, - 0x02, 0xb4, 0x0b, 0xa5, 0xcd, 0x66, 0x43, 0x33, 0xdc, 0xaa, 0xab, 0xa3, 0x76, 0xad, 0xf8, 0x58, - 0xac, 0x6f, 0x6e, 0x60, 0xcc, 0x29, 0x48, 0x86, 0x09, 0x72, 0xda, 0x21, 0x26, 0xe5, 0x33, 0x62, - 0xb2, 0x01, 0x6c, 0x94, 0x1f, 0xf2, 0x16, 0x2c, 0x28, 0xf2, 0x9f, 0xe7, 0xa0, 0x24, 0xdc, 0x71, - 0x0d, 0xbb, 0xb0, 0xad, 0xc8, 0x2e, 0xec, 0xed, 0xd1, 0xa6, 0x46, 0xe6, 0x16, 0xac, 0x1d, 0xdb, - 0x82, 0xbd, 0x33, 0x22, 0xde, 0xc5, 0xfb, 0xaf, 0x9f, 0x48, 0x30, 0x13, 0x9d, 0x94, 0xe8, 0x01, - 0x4c, 0xb1, 0x84, 0xa3, 0x76, 0xc8, 0x4e, 0x50, 0xe7, 0xfa, 0xa7, 0x23, 0xad, 0x80, 0x84, 0xc3, - 0x7c, 0xa8, 0xe7, 0x8b, 0xb1, 0x79, 0x24, 0x3a, 0x9d, 0xed, 0x52, 0x87, 0xaa, 0x5a, 0xcd, 0xbd, - 0x38, 0xa9, 0x6d, 0xea, 0x74, 0xd7, 0x6a, 0x51, 0x4b, 0xd5, 0x7b, 0x09, 0x45, 0x7c, 0x52, 0x86, - 0x91, 0xe5, 0x7f, 0x92, 0x60, 0x4a, 0x98, 0x7c, 0x0d, 0xbb, 0x8a, 0xdf, 0x8d, 0xee, 0x2a, 0xee, - 0x8e, 0xb8, 0xc0, 0xd3, 0xb7, 0x14, 0x7f, 0x1b, 0x98, 0xce, 0x96, 0x34, 0x9b, 0xd5, 0x47, 0x86, - 0x4d, 0xe3, 0xb3, 0x9a, 0x2d, 0x46, 0xcc, 0x29, 0xc8, 0x81, 0x39, 0x35, 0x16, 0x03, 0x84, 0x6b, - 0xeb, 0xa3, 0x59, 0xe2, 0x8b, 0x35, 0x2a, 0x02, 0x7e, 0x2e, 0x4e, 0xc1, 0x09, 0x15, 0x32, 0x81, - 0x04, 0x17, 0xfa, 0x14, 0x0a, 0x47, 0x94, 0x9a, 0x29, 0x07, 0xc9, 0x43, 0x22, 0x4f, 0x60, 0x42, - 0x99, 0xf7, 0xae, 0xdd, 0x6e, 0x62, 0x0e, 0x25, 0xff, 0x6f, 0xe0, 0x8f, 0x96, 0x3b, 0xc7, 0xfd, - 0x78, 0x2a, 0x5d, 0x26, 0x9e, 0x4e, 0xa5, 0xc5, 0x52, 0xf4, 0x04, 0xf2, 0x54, 0x1b, 0x75, 0x5b, - 0x28, 0x10, 0xdb, 0x5b, 0xad, 0x20, 0x20, 0xb5, 0xb7, 0x5a, 0x98, 0x41, 0xa0, 0x5d, 0x28, 0xb2, - 0xec, 0xc3, 0x96, 0x60, 0x7e, 0xf4, 0x25, 0xcd, 0xfa, 0x1f, 0x4c, 0x08, 0xf6, 0x65, 0x63, 0x17, - 0x47, 0xfe, 0x3e, 0x4c, 0x47, 0xd6, 0x29, 0xfa, 0x0c, 0x6e, 0x6a, 0x86, 0xd2, 0x6d, 0x28, 0x9a, - 0xa2, 0x77, 0x88, 0x77, 0x6a, 0x7f, 0x37, 0x6d, 0x87, 0xb1, 0x15, 0xe2, 0x13, 0xab, 0xdc, 0xbf, - 0x7b, 0x0b, 0xd3, 0x70, 0x04, 0x51, 0x56, 0x00, 0x82, 0x3e, 0xa2, 0x2a, 0x14, 0xd9, 0x3c, 0x73, - 0xf3, 0xc9, 0x64, 0x63, 0x92, 0x59, 0xc8, 0xa6, 0x9f, 0x8d, 0xdd, 0x76, 0xb4, 0x0a, 0x60, 0x93, - 0x8e, 0x45, 0x28, 0x0f, 0x06, 0xb9, 0xe8, 0x0d, 0x64, 0xcb, 0xa7, 0xe0, 0x10, 0x97, 0xfc, 0x2f, - 0x12, 0x4c, 0xef, 0x10, 0xfa, 0x03, 0xc3, 0x3a, 0x6e, 0xf2, 0xcb, 0xe2, 0x6b, 0x08, 0xb6, 0x38, - 0x12, 0x6c, 0xdf, 0x1b, 0x32, 0x32, 0x11, 0xeb, 0xb2, 0x42, 0xae, 0xfc, 0x33, 0x09, 0x96, 0x22, - 0x9c, 0x0f, 0x83, 0xa5, 0xbb, 0x07, 0x45, 0xd3, 0xb0, 0xa8, 0x97, 0x88, 0xc7, 0x52, 0xc8, 0xc2, - 0x58, 0x28, 0x15, 0x33, 0x18, 0xec, 0xa2, 0xa1, 0x2d, 0xc8, 0x51, 0x43, 0x4c, 0xd5, 0xf1, 0x30, - 0x09, 0xb1, 0x1a, 0x20, 0x30, 0x73, 0x6d, 0x03, 0xe7, 0xa8, 0xc1, 0x06, 0xa2, 0x12, 0xe1, 0x0a, - 0x07, 0x9f, 0x2b, 0xea, 0x01, 0x86, 0xc2, 0xa1, 0x65, 0xf4, 0x2f, 0xdd, 0x07, 0x7f, 0x20, 0x1e, - 0x59, 0x46, 0x1f, 0x73, 0x2c, 0xf9, 0xe7, 0x12, 0xcc, 0x47, 0x38, 0xaf, 0x21, 0xf0, 0x7f, 0x1a, - 0x0d, 0xfc, 0xef, 0x8c, 0xd3, 0x91, 0x8c, 0xf0, 0xff, 0xf3, 0x5c, 0xac, 0x1b, 0xac, 0xc3, 0xe8, - 0x10, 0xa6, 0x4c, 0xa3, 0xdb, 0x7a, 0x05, 0xf7, 0x74, 0xb3, 0x2c, 0x6f, 0x36, 0x03, 0x2c, 0x1c, - 0x06, 0x46, 0xa7, 0x30, 0xaf, 0x2b, 0x7d, 0x62, 0x9b, 0x4a, 0x87, 0xb4, 0x5e, 0xc1, 0x01, 0xc9, - 0x6b, 0xfc, 0x22, 0x20, 0x8e, 0x88, 0x93, 0x4a, 0xd0, 0x36, 0x94, 0x54, 0x93, 0xd7, 0x71, 0xa2, - 0x76, 0x19, 0x9a, 0x45, 0xdd, 0xaa, 0xcf, 0x8d, 0xe7, 0xe2, 0x03, 0x7b, 0x18, 0xf2, 0xdf, 0xc5, - 0x67, 0x03, 0x9b, 0x7f, 0xe8, 0x31, 0x94, 0xf9, 0xb3, 0x8b, 0x8e, 0xa1, 0x79, 0x37, 0x03, 0x6c, - 0x64, 0x9b, 0xa2, 0xed, 0xe5, 0x59, 0xf5, 0xf5, 0x94, 0x43, 0x5f, 0x8f, 0x8c, 0x7d, 0x61, 0xb4, - 0x03, 0x05, 0xf3, 0xab, 0x54, 0x30, 0x3c, 0xc9, 0xf1, 0xb2, 0x85, 0xe3, 0xc8, 0x7f, 0x9c, 0x8f, - 0x99, 0xcb, 0x53, 0xdd, 0xf3, 0x57, 0x36, 0xea, 0x7e, 0xc5, 0x94, 0x39, 0xf2, 0x07, 0x50, 0x12, - 0x19, 0x5e, 0x4c, 0xe6, 0x6f, 0x8c, 0x33, 0x99, 0xc3, 0x59, 0xcc, 0xdf, 0xb0, 0x78, 0x8d, 0x1e, - 0x30, 0xfa, 0x1e, 0x4c, 0x10, 0x57, 0x85, 0x9b, 0x1b, 0x3f, 0x1c, 0x47, 0x45, 0x10, 0x57, 0x83, - 0x42, 0x55, 0xb4, 0x09, 0x54, 0xf4, 0x2d, 0xe6, 0x2f, 0xc6, 0xcb, 0x36, 0x81, 0x76, 0xa5, 0xc0, - 0xd3, 0xd5, 0x6d, 0xb7, 0xdb, 0x7e, 0xf3, 0xcb, 0xb3, 0x2a, 0x04, 0x9f, 0x38, 0x2c, 0x21, 0xff, - 0x9b, 0x04, 0xf3, 0xdc, 0x43, 0x1d, 0xc7, 0x52, 0xe9, 0xe0, 0xda, 0x12, 0xd3, 0x7e, 0x24, 0x31, - 0x7d, 0x30, 0xc4, 0x2d, 0x09, 0x0b, 0x33, 0x93, 0xd3, 0x2f, 0x24, 0x78, 0x2d, 0xc1, 0x7d, 0x0d, - 0x71, 0x71, 0x2f, 0x1a, 0x17, 0xdf, 0x1b, 0xb7, 0x43, 0x19, 0xb1, 0xf1, 0xf3, 0x9b, 0x29, 0xdd, - 0xe1, 0x2b, 0x65, 0x15, 0xc0, 0xb4, 0xd4, 0x13, 0x55, 0x23, 0x3d, 0x71, 0x3b, 0x5d, 0x0e, 0xbd, - 0x81, 0xf2, 0x29, 0x38, 0xc4, 0x85, 0x6c, 0x58, 0xec, 0x92, 0x43, 0xc5, 0xd1, 0xe8, 0x5a, 0xb7, - 0xbb, 0xae, 0x98, 0xca, 0x81, 0xaa, 0xa9, 0x54, 0x15, 0xc7, 0x05, 0x93, 0x8d, 0x4f, 0xdc, 0x5b, - 0xe3, 0x34, 0x8e, 0x97, 0x67, 0xd5, 0xdb, 0x69, 0xb7, 0x43, 0x1e, 0xcb, 0x00, 0x67, 0x40, 0xa3, - 0x01, 0x54, 0x2c, 0xf2, 0x7d, 0x47, 0xb5, 0x48, 0x77, 0xc3, 0x32, 0xcc, 0x88, 0xda, 0x3c, 0x57, - 0xfb, 0xdb, 0xe7, 0x67, 0xd5, 0x0a, 0xce, 0xe0, 0x19, 0xae, 0x38, 0x13, 0x1e, 0x3d, 0x87, 0x05, - 0xc5, 0x7d, 0x3a, 0x16, 0xd1, 0xea, 0xae, 0x92, 0x8f, 0xce, 0xcf, 0xaa, 0x0b, 0x6b, 0x49, 0xf2, - 0x70, 0x85, 0x69, 0xa0, 0xa8, 0x0e, 0xa5, 0x13, 0x43, 0x73, 0xfa, 0xc4, 0xae, 0x14, 0x39, 0x3e, - 0x4b, 0x04, 0xa5, 0x7d, 0xb7, 0xe9, 0xe5, 0x59, 0x75, 0xe2, 0x51, 0x8b, 0xaf, 0x3e, 0x8f, 0x8b, - 0x6d, 0x28, 0x59, 0x2d, 0x29, 0x56, 0x3c, 0x3f, 0x31, 0x2e, 0x07, 0x51, 0xeb, 0x49, 0x40, 0xc2, - 0x61, 0x3e, 0xf4, 0x0c, 0x26, 0x8f, 0xc4, 0xa9, 0x84, 0x5d, 0x29, 0x8d, 0x94, 0x84, 0x23, 0xa7, - 0x18, 0x8d, 0x79, 0xa1, 0x62, 0xd2, 0x6b, 0xb6, 0x71, 0x80, 0x88, 0xde, 0x82, 0x12, 0xff, 0xd8, - 0xdc, 0xe0, 0xc7, 0x71, 0xe5, 0x20, 0xb6, 0x3d, 0x71, 0x9b, 0xb1, 0x47, 0xf7, 0x58, 0x37, 0x9b, - 0xeb, 0xfc, 0x58, 0x38, 0xc6, 0xba, 0xd9, 0x5c, 0xc7, 0x1e, 0x1d, 0x7d, 0x06, 0x25, 0x9b, 0x6c, - 0xa9, 0xba, 0x73, 0x5a, 0x81, 0x91, 0x2e, 0x95, 0x5b, 0x0f, 0x39, 0x77, 0xec, 0x60, 0x2c, 0xd0, - 0x20, 0xe8, 0xd8, 0x83, 0x45, 0x47, 0x30, 0x69, 0x39, 0xfa, 0x9a, 0xbd, 0x67, 0x13, 0xab, 0x32, - 0xc5, 0x75, 0x0c, 0x0b, 0xe7, 0xd8, 0xe3, 0x8f, 0x6b, 0xf1, 0x3d, 0xe4, 0x73, 0xe0, 0x00, 0x1c, - 0xfd, 0xa9, 0x04, 0xc8, 0x76, 0x4c, 0x53, 0x23, 0x7d, 0xa2, 0x53, 0x45, 0xe3, 0x67, 0x71, 0x76, - 0xe5, 0x26, 0xd7, 0xf9, 0x3b, 0xc3, 0xfa, 0x95, 0x10, 0x8c, 0x2b, 0xf7, 0x0f, 0xbd, 0x93, 0xac, - 0x38, 0x45, 0x2f, 0x73, 0xed, 0xa1, 0xcd, 0x7f, 0x57, 0xa6, 0x47, 0x72, 0x6d, 0xfa, 0x99, 0x63, - 0xe0, 0x5a, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x87, 0x45, 0x8b, 0x28, 0xdd, 0x5d, 0x5d, 0x1b, 0x60, - 0xc3, 0xa0, 0x8f, 0x54, 0x8d, 0xd8, 0x03, 0x9b, 0x92, 0x7e, 0x65, 0x86, 0x0f, 0xbb, 0xff, 0x28, - 0x03, 0xa7, 0x72, 0xe1, 0x0c, 0x69, 0xd4, 0x87, 0xaa, 0x17, 0x32, 0xd8, 0x7a, 0xf2, 0x63, 0xd6, - 0x43, 0xbb, 0xa3, 0x68, 0xee, 0x3d, 0xc0, 0x2c, 0x57, 0xf0, 0xe6, 0xf9, 0x59, 0xb5, 0xba, 0x71, - 0x31, 0x2b, 0x1e, 0x86, 0x85, 0xbe, 0x0b, 0x15, 0x25, 0x4b, 0xcf, 0x1c, 0xd7, 0xf3, 0x06, 0x8b, - 0x43, 0x99, 0x0a, 0x32, 0xa5, 0x11, 0x85, 0x39, 0x25, 0xfa, 0x42, 0xd5, 0xae, 0xcc, 0x8f, 0x74, - 0x10, 0x19, 0x7b, 0xd8, 0x1a, 0x1c, 0x46, 0xc4, 0x08, 0x36, 0x4e, 0x68, 0xe0, 0xcf, 0x27, 0xc4, - 0x61, 0xfa, 0xf5, 0xbc, 0x57, 0x1c, 0xef, 0xf9, 0x44, 0x60, 0xda, 0x2b, 0x7b, 0x3e, 0x11, 0x82, - 0xbc, 0xf8, 0xf8, 0xee, 0xbf, 0x73, 0xb0, 0x10, 0x30, 0x8f, 0xfc, 0x7c, 0x22, 0x45, 0xe4, 0xca, - 0x9e, 0x4f, 0xa4, 0xbf, 0x3f, 0xc8, 0x5f, 0xf5, 0xfb, 0x83, 0x2b, 0x78, 0xb6, 0xc1, 0x9f, 0x34, - 0x04, 0xae, 0xfb, 0xff, 0xf7, 0xa4, 0x21, 0xb0, 0x2d, 0xa3, 0xc8, 0xfa, 0x87, 0x5c, 0xb8, 0x03, - 0xbf, 0xf6, 0xf7, 0xea, 0x5f, 0xfd, 0x51, 0xa7, 0xfc, 0x8b, 0x3c, 0xcc, 0xc5, 0x57, 0x63, 0xe4, - 0xfa, 0x55, 0x1a, 0x7a, 0xfd, 0xda, 0x84, 0x5b, 0x87, 0x8e, 0xa6, 0x0d, 0xb8, 0x1b, 0x42, 0x77, - 0xb0, 0xee, 0xf5, 0xc9, 0x1b, 0x42, 0xf2, 0xd6, 0xa3, 0x14, 0x1e, 0x9c, 0x2a, 0x99, 0x71, 0x95, - 0x9c, 0xbf, 0xd4, 0x55, 0x72, 0xe2, 0x66, 0xb3, 0x30, 0xc6, 0xcd, 0x66, 0xea, 0xb5, 0x70, 0xf1, - 0x12, 0xd7, 0xc2, 0x97, 0xb9, 0xc7, 0x4d, 0x09, 0x62, 0xc3, 0xee, 0x71, 0xe5, 0x37, 0x60, 0x59, - 0x88, 0x51, 0x7e, 0xc5, 0xaa, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x6d, 0x38, 0xfd, 0xfe, 0x40, 0xfe, - 0x26, 0xcc, 0x44, 0x1f, 0x0f, 0xb8, 0x23, 0xed, 0xbe, 0x5f, 0x10, 0x97, 0x58, 0xa1, 0x91, 0x76, - 0xdb, 0xb1, 0xcf, 0x21, 0xff, 0x50, 0x82, 0xc5, 0xf4, 0x47, 0x82, 0x48, 0x83, 0x99, 0xbe, 0x72, - 0x1a, 0x7e, 0x51, 0x29, 0x5d, 0xf2, 0x78, 0x81, 0xdf, 0x1a, 0x6f, 0x47, 0xb0, 0x70, 0x0c, 0x5b, - 0xfe, 0x52, 0x82, 0xa5, 0x8c, 0xfb, 0xda, 0xeb, 0xb5, 0x04, 0x3d, 0x85, 0x72, 0x5f, 0x39, 0x6d, - 0x39, 0x56, 0x8f, 0x5c, 0xfa, 0x40, 0x85, 0x47, 0x8c, 0x6d, 0x81, 0x82, 0x7d, 0x3c, 0xf9, 0xc7, - 0x12, 0x54, 0xb2, 0x4a, 0x5b, 0xf4, 0x20, 0x72, 0xb3, 0xfc, 0xf5, 0xd8, 0xcd, 0xf2, 0x7c, 0x42, - 0xee, 0x8a, 0xee, 0x95, 0xff, 0x5e, 0x82, 0xc5, 0xf4, 0x12, 0x1f, 0xbd, 0x1f, 0xb1, 0xb0, 0x1a, - 0xb3, 0x70, 0x36, 0x26, 0x25, 0xec, 0xfb, 0x1e, 0xcc, 0x88, 0x8d, 0x80, 0x80, 0x11, 0x5e, 0x95, - 0xd3, 0x62, 0xa5, 0x80, 0xf0, 0x0a, 0x5f, 0x3e, 0x5e, 0xd1, 0x36, 0x1c, 0x43, 0x93, 0xff, 0x24, - 0x07, 0xc5, 0x56, 0x47, 0xd1, 0xc8, 0x35, 0x94, 0x59, 0xdf, 0x8e, 0x94, 0x59, 0xc3, 0xfe, 0xfd, - 0xc0, 0xad, 0xca, 0xac, 0xb0, 0x70, 0xac, 0xc2, 0x7a, 0x7b, 0x24, 0xb4, 0x8b, 0x8b, 0xab, 0xdf, - 0x82, 0x49, 0x5f, 0xe9, 0x78, 0x31, 0x5f, 0xfe, 0x9b, 0x1c, 0x4c, 0x85, 0x54, 0x8c, 0x99, 0x31, - 0x0e, 0x23, 0x99, 0x76, 0x94, 0xff, 0x41, 0x85, 0x74, 0xd5, 0xbc, 0xdc, 0xea, 0x3e, 0x12, 0x0c, - 0x9e, 0x85, 0x25, 0x53, 0xee, 0x37, 0x61, 0x86, 0xf2, 0xff, 0x09, 0xf9, 0xc7, 0x90, 0x79, 0x3e, - 0x17, 0xfd, 0xa7, 0xa5, 0xed, 0x08, 0x15, 0xc7, 0xb8, 0x97, 0x3f, 0x81, 0xe9, 0x88, 0xb2, 0xb1, - 0xde, 0xf8, 0xfd, 0xa3, 0x04, 0x5f, 0x1f, 0xba, 0x49, 0x44, 0x8d, 0xc8, 0x22, 0xa9, 0xc5, 0x16, - 0xc9, 0x4a, 0x36, 0xc0, 0x15, 0xbe, 0x15, 0xf9, 0x61, 0x0e, 0x50, 0xfb, 0x48, 0xb5, 0xba, 0x4d, - 0xc5, 0xa2, 0x03, 0x2c, 0xfe, 0xec, 0x75, 0x0d, 0x0b, 0xe6, 0x01, 0x4c, 0x75, 0x89, 0xdd, 0xb1, - 0x54, 0xee, 0x1c, 0x51, 0x9d, 0xfb, 0x07, 0x29, 0x1b, 0x01, 0x09, 0x87, 0xf9, 0xd0, 0x77, 0xa0, - 0x7c, 0xe2, 0xfe, 0x09, 0xd1, 0x3b, 0x9c, 0x1d, 0x56, 0x48, 0x06, 0x7f, 0x5b, 0x0c, 0xe6, 0x8f, - 0x68, 0xb0, 0xb1, 0x0f, 0x26, 0x7f, 0x2e, 0xc1, 0x62, 0xd2, 0x11, 0x1b, 0xcc, 0xd4, 0xab, 0x77, - 0xc6, 0x1b, 0x50, 0xe0, 0xe8, 0xcc, 0x0b, 0x37, 0xdd, 0x43, 0x77, 0xa6, 0x19, 0xf3, 0x56, 0xf9, - 0x3f, 0x24, 0x58, 0x4e, 0x37, 0xed, 0x1a, 0xca, 0xf6, 0xa7, 0xd1, 0xb2, 0x7d, 0xd8, 0x39, 0x45, - 0xba, 0x9d, 0x19, 0x25, 0xfc, 0xbf, 0xa7, 0xfa, 0xfc, 0x1a, 0x3a, 0xb5, 0x1f, 0xed, 0xd4, 0xfd, - 0xb1, 0x3b, 0x95, 0xde, 0xa1, 0xc6, 0xbb, 0x2f, 0xbe, 0x58, 0xb9, 0xf1, 0xcb, 0x2f, 0x56, 0x6e, - 0xfc, 0xea, 0x8b, 0x95, 0x1b, 0x7f, 0x74, 0xbe, 0x22, 0xbd, 0x38, 0x5f, 0x91, 0x7e, 0x79, 0xbe, - 0x22, 0xfd, 0xea, 0x7c, 0x45, 0xfa, 0xaf, 0xf3, 0x15, 0xe9, 0x2f, 0xbe, 0x5c, 0xb9, 0xf1, 0xb4, - 0x24, 0x70, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x55, 0xc4, 0x0b, 0x53, 0x44, 0x3d, 0x00, 0x00, + // 3571 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x1c, 0x47, + 0x76, 0x57, 0xcf, 0x0c, 0x39, 0xc3, 0x47, 0xf1, 0xab, 0x28, 0x93, 0x63, 0xca, 0xe2, 0xc8, 0x6d, + 0x40, 0x91, 0x1c, 0x69, 0xc6, 0x92, 0x2d, 0x59, 0xb1, 0x10, 0x3b, 0x1c, 0x52, 0x1f, 0x74, 0xf8, + 0xa5, 0x1a, 0x52, 0x71, 0x8c, 0xc8, 0x71, 0x73, 0xa6, 0x38, 0x6c, 0xb1, 0xa7, 0xbb, 0xdd, 0x5d, + 0x4d, 0x73, 0x80, 0x20, 0xc8, 0x21, 0x08, 0x10, 0x20, 0x41, 0x92, 0x83, 0xf3, 0x71, 0x8b, 0x2f, + 0x39, 0x25, 0x48, 0x6e, 0xc9, 0xc1, 0x30, 0x10, 0xc0, 0x0b, 0x08, 0x0b, 0x2f, 0xe0, 0xdb, 0xfa, + 0x44, 0xac, 0xe9, 0xd3, 0x62, 0xff, 0x81, 0x85, 0x0e, 0x8b, 0x45, 0x55, 0x57, 0x7f, 0x77, 0x73, + 0x66, 0x68, 0x89, 0x58, 0x2c, 0xf6, 0xc6, 0xa9, 0xf7, 0xde, 0xef, 0xbd, 0x7a, 0xf5, 0xea, 0xbd, + 0xd7, 0x55, 0x45, 0xb8, 0xb7, 0x77, 0xdb, 0xae, 0xaa, 0x46, 0x6d, 0xcf, 0xd9, 0x26, 0x96, 0x4e, + 0x28, 0xb1, 0x6b, 0xfb, 0x44, 0x6f, 0x19, 0x56, 0x4d, 0x10, 0x14, 0x53, 0xad, 0x91, 0x03, 0x4a, + 0x74, 0x5b, 0x35, 0x74, 0xbb, 0xb6, 0x7f, 0x7d, 0x9b, 0x50, 0xe5, 0x7a, 0xad, 0x4d, 0x74, 0x62, + 0x29, 0x94, 0xb4, 0xaa, 0xa6, 0x65, 0x50, 0x03, 0x5d, 0x70, 0xd9, 0xab, 0x8a, 0xa9, 0x56, 0x03, + 0xf6, 0xaa, 0x60, 0x9f, 0xbb, 0xd6, 0x56, 0xe9, 0xae, 0xb3, 0x5d, 0x6d, 0x1a, 0x9d, 0x5a, 0xdb, + 0x68, 0x1b, 0x35, 0x2e, 0xb5, 0xed, 0xec, 0xf0, 0x5f, 0xfc, 0x07, 0xff, 0xcb, 0x45, 0x9b, 0x93, + 0x43, 0xca, 0x9b, 0x86, 0x45, 0x6a, 0xfb, 0x09, 0x8d, 0x73, 0x57, 0x42, 0x3c, 0xa6, 0xa1, 0xa9, + 0xcd, 0x6e, 0x96, 0x71, 0x73, 0x6f, 0x05, 0xac, 0x1d, 0xa5, 0xb9, 0xab, 0xea, 0xc4, 0xea, 0xd6, + 0xcc, 0xbd, 0x36, 0x97, 0xb5, 0x88, 0x6d, 0x38, 0x56, 0x93, 0x0c, 0x24, 0x65, 0xd7, 0x3a, 0x84, + 0x2a, 0x69, 0x66, 0xd5, 0xb2, 0xa4, 0x2c, 0x47, 0xa7, 0x6a, 0x27, 0xa9, 0xe6, 0x56, 0x2f, 0x01, + 0xbb, 0xb9, 0x4b, 0x3a, 0x4a, 0x42, 0xee, 0xcd, 0x2c, 0x39, 0x87, 0xaa, 0x5a, 0x4d, 0xd5, 0xa9, + 0x4d, 0xad, 0xb8, 0x90, 0x7c, 0x07, 0xa6, 0x16, 0x34, 0xcd, 0xf8, 0x94, 0xb4, 0xee, 0x69, 0xe4, + 0xe0, 0x91, 0xa1, 0x39, 0x1d, 0x82, 0x2e, 0xc1, 0x70, 0xcb, 0x52, 0xf7, 0x89, 0x55, 0x96, 0x2e, + 0x4a, 0x97, 0x47, 0xea, 0xe3, 0x4f, 0x0f, 0x2b, 0x67, 0x8e, 0x0e, 0x2b, 0xc3, 0x4b, 0x7c, 0x14, + 0x0b, 0xaa, 0x7c, 0x17, 0x26, 0x84, 0xf0, 0x03, 0xc3, 0xa6, 0x1b, 0x0a, 0xdd, 0x45, 0x37, 0x00, + 0x4c, 0x85, 0xee, 0x6e, 0x58, 0x64, 0x47, 0x3d, 0x10, 0xe2, 0x48, 0x88, 0xc3, 0x86, 0x4f, 0xc1, + 0x21, 0x2e, 0xf9, 0xdf, 0x24, 0x78, 0x79, 0xd1, 0xb1, 0xa9, 0xd1, 0x59, 0x25, 0xd4, 0x52, 0x9b, + 0x8b, 0x8e, 0x65, 0x11, 0x9d, 0x36, 0xa8, 0x42, 0x1d, 0x1b, 0x5d, 0x84, 0x82, 0xae, 0x74, 0x88, + 0xc0, 0x3a, 0x2b, 0xb0, 0x0a, 0x6b, 0x4a, 0x87, 0x60, 0x4e, 0x41, 0x1f, 0xc2, 0xd0, 0xbe, 0xa2, + 0x39, 0xa4, 0x9c, 0xbb, 0x28, 0x5d, 0x1e, 0xbd, 0x51, 0xad, 0x06, 0xa1, 0xe7, 0x3b, 0xa2, 0x6a, + 0xee, 0xb5, 0x79, 0x2c, 0x7a, 0xab, 0x5b, 0x7d, 0xe8, 0x28, 0x3a, 0x55, 0x69, 0xb7, 0x7e, 0x4e, + 0x40, 0x9e, 0x15, 0x7a, 0x1f, 0x31, 0x2c, 0xec, 0x42, 0xca, 0x7f, 0x09, 0x17, 0x32, 0x4d, 0x5b, + 0x51, 0x6d, 0x8a, 0x1e, 0xc3, 0x90, 0x4a, 0x49, 0xc7, 0x2e, 0x4b, 0x17, 0xf3, 0x97, 0x47, 0x6f, + 0xdc, 0xae, 0x1e, 0x1b, 0xf7, 0xd5, 0x4c, 0xb0, 0xfa, 0x98, 0x30, 0x63, 0x68, 0x99, 0xc1, 0x61, + 0x17, 0x55, 0xfe, 0x27, 0x09, 0x50, 0x58, 0x66, 0x53, 0xb1, 0xda, 0x84, 0xf6, 0xe1, 0x94, 0x3f, + 0xfd, 0x61, 0x4e, 0x99, 0x16, 0x90, 0xa3, 0xae, 0xc2, 0x88, 0x4f, 0x4c, 0x98, 0x49, 0x9a, 0xc4, + 0x9d, 0xf1, 0x28, 0xea, 0x8c, 0xeb, 0x03, 0x38, 0xc3, 0x45, 0xc9, 0xf0, 0xc2, 0x67, 0x39, 0x18, + 0x59, 0x52, 0x48, 0xc7, 0xd0, 0x1b, 0x84, 0xa2, 0x8f, 0xa1, 0xc4, 0x36, 0x5b, 0x4b, 0xa1, 0x0a, + 0x77, 0xc0, 0xe8, 0x8d, 0x37, 0x8e, 0x9b, 0x9d, 0x5d, 0x65, 0xdc, 0xd5, 0xfd, 0xeb, 0xd5, 0xf5, + 0xed, 0x27, 0xa4, 0x49, 0x57, 0x09, 0x55, 0x82, 0x98, 0x0c, 0xc6, 0xb0, 0x8f, 0x8a, 0xd6, 0xa0, + 0x60, 0x9b, 0xa4, 0x29, 0x7c, 0x77, 0xb5, 0xc7, 0x34, 0x7c, 0xcb, 0x1a, 0x26, 0x69, 0x06, 0x8b, + 0xc1, 0x7e, 0x61, 0x8e, 0x83, 0x1e, 0xc1, 0xb0, 0xcd, 0x57, 0xb9, 0x9c, 0x4f, 0xac, 0xc6, 0xf1, + 0x88, 0x6e, 0x6c, 0xf8, 0x1b, 0xd0, 0xfd, 0x8d, 0x05, 0x9a, 0xfc, 0xf3, 0x1c, 0x20, 0x9f, 0x77, + 0xd1, 0xd0, 0x5b, 0x2a, 0x55, 0x0d, 0x1d, 0xbd, 0x03, 0x05, 0xda, 0x35, 0xbd, 0xe8, 0xb8, 0xe4, + 0x19, 0xb4, 0xd9, 0x35, 0xc9, 0xb3, 0xc3, 0xca, 0x4c, 0x52, 0x82, 0x51, 0x30, 0x97, 0x41, 0x2b, + 0xbe, 0xa9, 0x39, 0x2e, 0xfd, 0x56, 0x54, 0xf5, 0xb3, 0xc3, 0x4a, 0x4a, 0x2e, 0xae, 0xfa, 0x48, + 0x51, 0x03, 0xd1, 0x3e, 0x20, 0x4d, 0xb1, 0xe9, 0xa6, 0xa5, 0xe8, 0xb6, 0xab, 0x49, 0xed, 0x10, + 0xe1, 0x84, 0xd7, 0xfb, 0x5b, 0x34, 0x26, 0x51, 0x9f, 0x13, 0x56, 0xa0, 0x95, 0x04, 0x1a, 0x4e, + 0xd1, 0xc0, 0x32, 0x98, 0x45, 0x14, 0xdb, 0xd0, 0xcb, 0x85, 0x68, 0x06, 0xc3, 0x7c, 0x14, 0x0b, + 0x2a, 0xba, 0x02, 0xc5, 0x0e, 0xb1, 0x6d, 0xa5, 0x4d, 0xca, 0x43, 0x9c, 0x71, 0x42, 0x30, 0x16, + 0x57, 0xdd, 0x61, 0xec, 0xd1, 0xe5, 0x2f, 0x24, 0x18, 0xf3, 0x3d, 0xc7, 0xa3, 0xfd, 0xcf, 0x12, + 0x71, 0x58, 0xed, 0x6f, 0x4a, 0x4c, 0x9a, 0x47, 0xe1, 0xa4, 0xd0, 0x56, 0xf2, 0x46, 0x42, 0x31, + 0xb8, 0xea, 0xed, 0xa5, 0x1c, 0xdf, 0x4b, 0x97, 0xfb, 0x0d, 0x99, 0x8c, 0x2d, 0xf4, 0xcf, 0x85, + 0x90, 0xf9, 0x2c, 0x34, 0xd1, 0x63, 0x28, 0xd9, 0x44, 0x23, 0x4d, 0x6a, 0x58, 0xc2, 0xfc, 0x37, + 0xfb, 0x34, 0x5f, 0xd9, 0x26, 0x5a, 0x43, 0x88, 0xd6, 0xcf, 0x32, 0xfb, 0xbd, 0x5f, 0xd8, 0x87, + 0x44, 0x0f, 0xa1, 0x44, 0x49, 0xc7, 0xd4, 0x14, 0xea, 0xe5, 0xa0, 0xd7, 0xc2, 0x53, 0x60, 0x91, + 0xc3, 0xc0, 0x36, 0x8c, 0xd6, 0xa6, 0x60, 0xe3, 0xdb, 0xc7, 0x77, 0x89, 0x37, 0x8a, 0x7d, 0x18, + 0xb4, 0x0f, 0xe3, 0x8e, 0xd9, 0x62, 0x9c, 0x94, 0xd5, 0xb0, 0x76, 0x57, 0x44, 0xd2, 0xad, 0x7e, + 0x7d, 0xb3, 0x15, 0x91, 0xae, 0xcf, 0x08, 0x5d, 0xe3, 0xd1, 0x71, 0x1c, 0xd3, 0x82, 0x16, 0x60, + 0xa2, 0xa3, 0xea, 0x98, 0x28, 0xad, 0x6e, 0x83, 0x34, 0x0d, 0xbd, 0x65, 0xf3, 0xb0, 0x1a, 0xaa, + 0xcf, 0x0a, 0x80, 0x89, 0xd5, 0x28, 0x19, 0xc7, 0xf9, 0xd1, 0xfb, 0x80, 0xbc, 0x69, 0xdc, 0x77, + 0x4b, 0xb0, 0x6a, 0xe8, 0x3c, 0xe6, 0xf2, 0x41, 0x70, 0x6f, 0x26, 0x38, 0x70, 0x8a, 0x14, 0x5a, + 0x81, 0x73, 0x16, 0xd9, 0x57, 0xd9, 0x1c, 0x1f, 0xa8, 0x36, 0x35, 0xac, 0xee, 0x8a, 0xda, 0x51, + 0x69, 0x79, 0x98, 0xdb, 0x54, 0x3e, 0x3a, 0xac, 0x9c, 0xc3, 0x29, 0x74, 0x9c, 0x2a, 0x25, 0xff, + 0xcb, 0x30, 0x4c, 0xc4, 0xf2, 0x0d, 0x7a, 0x04, 0x33, 0x4d, 0xb7, 0x38, 0xad, 0x39, 0x9d, 0x6d, + 0x62, 0x35, 0x9a, 0xbb, 0xa4, 0xe5, 0x68, 0xa4, 0xc5, 0x03, 0x65, 0xa8, 0x3e, 0x2f, 0x2c, 0x9e, + 0x59, 0x4c, 0xe5, 0xc2, 0x19, 0xd2, 0xcc, 0x0b, 0x3a, 0x1f, 0x5a, 0x55, 0x6d, 0xdb, 0xc7, 0xcc, + 0x71, 0x4c, 0xdf, 0x0b, 0x6b, 0x09, 0x0e, 0x9c, 0x22, 0xc5, 0x6c, 0x6c, 0x11, 0x5b, 0xb5, 0x48, + 0x2b, 0x6e, 0x63, 0x3e, 0x6a, 0xe3, 0x52, 0x2a, 0x17, 0xce, 0x90, 0x46, 0x37, 0x61, 0xd4, 0xd5, + 0xc6, 0xd7, 0x4f, 0x2c, 0xb4, 0x5f, 0x0e, 0xd7, 0x02, 0x12, 0x0e, 0xf3, 0xb1, 0xa9, 0x19, 0xdb, + 0x36, 0xb1, 0xf6, 0x49, 0x2b, 0x7b, 0x81, 0xd7, 0x13, 0x1c, 0x38, 0x45, 0x8a, 0x4d, 0xcd, 0x8d, + 0xc0, 0xc4, 0xd4, 0x86, 0xa3, 0x53, 0xdb, 0x4a, 0xe5, 0xc2, 0x19, 0xd2, 0x2c, 0x8e, 0x5d, 0x93, + 0x17, 0xf6, 0x15, 0x55, 0x53, 0xb6, 0x35, 0x52, 0x2e, 0x46, 0xe3, 0x78, 0x2d, 0x4a, 0xc6, 0x71, + 0x7e, 0x74, 0x1f, 0xa6, 0xdc, 0xa1, 0x2d, 0x5d, 0xf1, 0x41, 0x4a, 0x1c, 0xe4, 0x65, 0x01, 0x32, + 0xb5, 0x16, 0x67, 0xc0, 0x49, 0x19, 0xf4, 0x0e, 0x8c, 0x37, 0x0d, 0x4d, 0xe3, 0xf1, 0xb8, 0x68, + 0x38, 0x3a, 0x2d, 0x8f, 0x70, 0x14, 0xc4, 0xf6, 0xe3, 0x62, 0x84, 0x82, 0x63, 0x9c, 0x88, 0x00, + 0x34, 0xbd, 0x82, 0x63, 0x97, 0xa1, 0xaf, 0x5e, 0x23, 0x59, 0xf4, 0x82, 0x1e, 0xc0, 0x1f, 0xb2, + 0x71, 0x08, 0x58, 0xfe, 0xb1, 0x04, 0xb3, 0x19, 0xa9, 0x03, 0xbd, 0x17, 0x29, 0xb1, 0xbf, 0x1f, + 0x2b, 0xb1, 0xe7, 0x33, 0xc4, 0x42, 0x75, 0x56, 0x87, 0x31, 0x8b, 0xcd, 0x4a, 0x6f, 0xbb, 0x2c, + 0x22, 0x47, 0xde, 0xec, 0x31, 0x0d, 0x1c, 0x96, 0x09, 0x72, 0xfe, 0xd4, 0xd1, 0x61, 0x65, 0x2c, + 0x42, 0xc3, 0x51, 0x78, 0xf9, 0x5f, 0x73, 0x00, 0x4b, 0xc4, 0xd4, 0x8c, 0x6e, 0x87, 0xe8, 0xa7, + 0xd1, 0x43, 0xad, 0x47, 0x7a, 0xa8, 0x6b, 0xbd, 0x96, 0xc7, 0x37, 0x2d, 0xb3, 0x89, 0xfa, 0x93, + 0x58, 0x13, 0x55, 0xeb, 0x1f, 0xf2, 0xf8, 0x2e, 0xea, 0xa7, 0x79, 0x98, 0x0e, 0x98, 0x83, 0x36, + 0xea, 0x4e, 0x64, 0x8d, 0x7f, 0x2f, 0xb6, 0xc6, 0xb3, 0x29, 0x22, 0x2f, 0xac, 0x8f, 0x7a, 0xfe, + 0xfd, 0x0c, 0x7a, 0x02, 0xe3, 0xac, 0x71, 0x72, 0xc3, 0x83, 0xb7, 0x65, 0xc3, 0x03, 0xb7, 0x65, + 0x7e, 0x01, 0x5d, 0x89, 0x20, 0xe1, 0x18, 0x72, 0x46, 0x1b, 0x58, 0x7c, 0xd1, 0x6d, 0xa0, 0xfc, + 0xa5, 0x04, 0xe3, 0xc1, 0x32, 0x9d, 0x42, 0xd3, 0xb6, 0x16, 0x6d, 0xda, 0xae, 0xf4, 0x1d, 0xa2, + 0x19, 0x5d, 0xdb, 0x2f, 0x59, 0x83, 0xef, 0x33, 0xb1, 0x0d, 0xbe, 0xad, 0x34, 0xf7, 0xfa, 0xf8, + 0xfc, 0xfb, 0x4c, 0x02, 0x24, 0xaa, 0xc0, 0x82, 0xae, 0x1b, 0x54, 0x71, 0x73, 0xa5, 0x6b, 0xd6, + 0x72, 0xdf, 0x66, 0x79, 0x1a, 0xab, 0x5b, 0x09, 0xac, 0xbb, 0x3a, 0xb5, 0xba, 0xc1, 0x8a, 0x24, + 0x19, 0x70, 0x8a, 0x01, 0x48, 0x01, 0xb0, 0x04, 0xe6, 0xa6, 0x21, 0x36, 0xf2, 0xb5, 0x3e, 0x72, + 0x1e, 0x13, 0x58, 0x34, 0xf4, 0x1d, 0xb5, 0x1d, 0xa4, 0x1d, 0xec, 0x03, 0xe1, 0x10, 0xe8, 0xdc, + 0x5d, 0x98, 0xcd, 0xb0, 0x16, 0x4d, 0x42, 0x7e, 0x8f, 0x74, 0x5d, 0xb7, 0x61, 0xf6, 0x27, 0x3a, + 0x17, 0xfe, 0x4c, 0x1e, 0x11, 0x5f, 0xb8, 0xef, 0xe4, 0x6e, 0x4b, 0xf2, 0x17, 0x43, 0xe1, 0xd8, + 0xe1, 0x1d, 0xf3, 0x65, 0x28, 0x59, 0xc4, 0xd4, 0xd4, 0xa6, 0x62, 0x8b, 0x46, 0x88, 0x37, 0xbf, + 0x58, 0x8c, 0x61, 0x9f, 0x1a, 0xe9, 0xad, 0x73, 0x2f, 0xb6, 0xb7, 0xce, 0x3f, 0x9f, 0xde, 0xfa, + 0xcf, 0xa1, 0x64, 0x7b, 0x5d, 0x75, 0x81, 0x43, 0x5e, 0x1f, 0x20, 0xbf, 0x8a, 0x86, 0xda, 0x57, + 0xe0, 0xb7, 0xd2, 0x3e, 0x68, 0x5a, 0x13, 0x3d, 0x34, 0x60, 0x13, 0xfd, 0x5c, 0x1b, 0x5f, 0x96, + 0x53, 0x4d, 0xc5, 0xb1, 0x49, 0x8b, 0x27, 0xa2, 0x52, 0x90, 0x53, 0x37, 0xf8, 0x28, 0x16, 0x54, + 0xf4, 0x38, 0x12, 0xb2, 0xa5, 0x93, 0x84, 0xec, 0x78, 0x76, 0xb8, 0xa2, 0x2d, 0x98, 0x35, 0x2d, + 0xa3, 0x6d, 0x11, 0xdb, 0x5e, 0x22, 0x4a, 0x4b, 0x53, 0x75, 0xe2, 0xf9, 0xc7, 0xed, 0x88, 0xce, + 0x1f, 0x1d, 0x56, 0x66, 0x37, 0xd2, 0x59, 0x70, 0x96, 0xac, 0xfc, 0xb4, 0x00, 0x93, 0xf1, 0x0a, + 0x98, 0xd1, 0xa4, 0x4a, 0x27, 0x6a, 0x52, 0xaf, 0x86, 0x36, 0x83, 0xdb, 0xc1, 0xfb, 0xab, 0x9f, + 0xb2, 0x21, 0x16, 0x60, 0x42, 0x64, 0x03, 0x8f, 0x28, 0xda, 0x74, 0x7f, 0xf5, 0xb7, 0xa2, 0x64, + 0x1c, 0xe7, 0x67, 0xad, 0x67, 0xd0, 0x51, 0x7a, 0x20, 0x85, 0x68, 0xeb, 0xb9, 0x10, 0x67, 0xc0, + 0x49, 0x19, 0xb4, 0x0a, 0xd3, 0x8e, 0x9e, 0x84, 0x72, 0xa3, 0xf1, 0xbc, 0x80, 0x9a, 0xde, 0x4a, + 0xb2, 0xe0, 0x34, 0x39, 0xb4, 0x13, 0xe9, 0x46, 0x87, 0x79, 0x86, 0xbd, 0xd1, 0xf7, 0xde, 0xe9, + 0xbb, 0x1d, 0x45, 0x77, 0x60, 0xcc, 0xe2, 0xdf, 0x1d, 0x9e, 0xc1, 0x6e, 0xef, 0xfe, 0x92, 0x10, + 0x1b, 0xc3, 0x61, 0x22, 0x8e, 0xf2, 0xa6, 0xb4, 0xdb, 0xa5, 0x7e, 0xdb, 0x6d, 0xf9, 0xff, 0xa5, + 0x70, 0x11, 0xf2, 0x5b, 0xe0, 0x5e, 0xa7, 0x4c, 0x09, 0x89, 0x50, 0x77, 0x64, 0xa4, 0x77, 0xbf, + 0xb7, 0x06, 0xea, 0x7e, 0x83, 0xe2, 0xd9, 0xbb, 0xfd, 0xfd, 0x5c, 0x82, 0x99, 0x7b, 0x8d, 0xfb, + 0x96, 0xe1, 0x98, 0x9e, 0x39, 0xeb, 0xa6, 0xeb, 0xd7, 0xb7, 0xa1, 0x60, 0x39, 0x9a, 0x37, 0x8f, + 0xd7, 0xbc, 0x79, 0x60, 0x47, 0x63, 0xf3, 0x98, 0x8e, 0x49, 0xb9, 0x93, 0x60, 0x02, 0x68, 0x0d, + 0x86, 0x2d, 0x45, 0x6f, 0x13, 0xaf, 0xac, 0x5e, 0xea, 0x61, 0xfd, 0xf2, 0x12, 0x66, 0xec, 0xa1, + 0xe6, 0x8d, 0x4b, 0x63, 0x81, 0x22, 0xff, 0xbd, 0x04, 0x13, 0x0f, 0x36, 0x37, 0x37, 0x96, 0x75, + 0xbe, 0xa3, 0xf9, 0x79, 0xfa, 0x45, 0x28, 0x98, 0x0a, 0xdd, 0x8d, 0x57, 0x7a, 0x46, 0xc3, 0x9c, + 0x82, 0x3e, 0x80, 0x22, 0xcb, 0x24, 0x44, 0x6f, 0xf5, 0xd9, 0x6a, 0x0b, 0xf8, 0xba, 0x2b, 0x14, + 0x74, 0x88, 0x62, 0x00, 0x7b, 0x70, 0xf2, 0x1e, 0x9c, 0x0b, 0x99, 0xc3, 0xfc, 0xc1, 0x8f, 0x81, + 0x51, 0x03, 0x86, 0x98, 0x66, 0xef, 0x94, 0xb7, 0xd7, 0x61, 0x66, 0x6c, 0x4a, 0x41, 0xa7, 0xc3, + 0x7e, 0xd9, 0xd8, 0xc5, 0x92, 0x57, 0x61, 0x8c, 0x5f, 0x22, 0x18, 0x16, 0xe5, 0x6e, 0x41, 0x17, + 0x20, 0xdf, 0x51, 0x75, 0x51, 0x67, 0x47, 0x85, 0x4c, 0x9e, 0xd5, 0x08, 0x36, 0xce, 0xc9, 0xca, + 0x81, 0xc8, 0x3c, 0x01, 0x59, 0x39, 0xc0, 0x6c, 0x5c, 0xbe, 0x0f, 0x45, 0xe1, 0xee, 0x30, 0x50, + 0xfe, 0x78, 0xa0, 0x7c, 0x0a, 0xd0, 0x3a, 0x14, 0x97, 0x37, 0xea, 0x9a, 0xe1, 0x76, 0x5d, 0x4d, + 0xb5, 0x65, 0xc5, 0xd7, 0x62, 0x71, 0x79, 0x09, 0x63, 0x4e, 0x41, 0x32, 0x0c, 0x93, 0x83, 0x26, + 0x31, 0x29, 0x8f, 0x88, 0x91, 0x3a, 0xb0, 0x55, 0xbe, 0xcb, 0x47, 0xb0, 0xa0, 0xc8, 0xff, 0x90, + 0x83, 0xa2, 0x70, 0xc7, 0x29, 0x7c, 0x85, 0xad, 0x44, 0xbe, 0xc2, 0x5e, 0xef, 0x2f, 0x34, 0x32, + 0x3f, 0xc1, 0x36, 0x63, 0x9f, 0x60, 0x57, 0xfb, 0xc4, 0x3b, 0xfe, 0xfb, 0xeb, 0x7f, 0x24, 0x18, + 0x8f, 0x06, 0x25, 0xba, 0x09, 0xa3, 0xac, 0xe0, 0xa8, 0x4d, 0xb2, 0x16, 0xf4, 0xb9, 0xfe, 0x21, + 0x4c, 0x23, 0x20, 0xe1, 0x30, 0x1f, 0x6a, 0xfb, 0x62, 0x2c, 0x8e, 0xc4, 0xa4, 0xb3, 0x5d, 0xea, + 0x50, 0x55, 0xab, 0xba, 0x17, 0x63, 0xd5, 0x65, 0x9d, 0xae, 0x5b, 0x0d, 0x6a, 0xa9, 0x7a, 0x3b, + 0xa1, 0x88, 0x07, 0x65, 0x18, 0x59, 0xfe, 0x3f, 0x09, 0x46, 0x85, 0xc9, 0xa7, 0xf0, 0x55, 0xf1, + 0xc7, 0xd1, 0xaf, 0x8a, 0x4b, 0x7d, 0x6e, 0xf0, 0xf4, 0x4f, 0x8a, 0xff, 0x08, 0x4c, 0x67, 0x5b, + 0x9a, 0x45, 0xf5, 0xae, 0x61, 0xd3, 0x78, 0x54, 0xb3, 0xcd, 0x88, 0x39, 0x05, 0x39, 0x30, 0xa9, + 0xc6, 0x72, 0x80, 0x70, 0x6d, 0xad, 0x3f, 0x4b, 0x7c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0xc6, 0x29, + 0x38, 0xa1, 0x42, 0x26, 0x90, 0xe0, 0x42, 0x0f, 0xa1, 0xb0, 0x4b, 0xa9, 0x99, 0x72, 0x5e, 0xdd, + 0x23, 0xf3, 0x04, 0x26, 0x94, 0xf8, 0xec, 0x36, 0x37, 0x37, 0x30, 0x87, 0x92, 0x7f, 0x15, 0xf8, + 0xa3, 0xe1, 0xc6, 0xb8, 0x9f, 0x4f, 0xa5, 0x93, 0xe4, 0xd3, 0xd1, 0xb4, 0x5c, 0x8a, 0x1e, 0x40, + 0x9e, 0x6a, 0xfd, 0x7e, 0x16, 0x0a, 0xc4, 0xcd, 0x95, 0x46, 0x90, 0x90, 0x36, 0x57, 0x1a, 0x98, + 0x41, 0xa0, 0x75, 0x18, 0x62, 0xd5, 0x87, 0x6d, 0xc1, 0x7c, 0xff, 0x5b, 0x9a, 0xcd, 0x3f, 0x08, + 0x08, 0xf6, 0xcb, 0xc6, 0x2e, 0x8e, 0xfc, 0x09, 0x8c, 0x45, 0xf6, 0x29, 0xfa, 0x18, 0xce, 0x6a, + 0x86, 0xd2, 0xaa, 0x2b, 0x9a, 0xa2, 0x37, 0x89, 0x77, 0x39, 0x70, 0x29, 0xed, 0x0b, 0x63, 0x25, + 0xc4, 0x27, 0x76, 0xb9, 0x7f, 0x9d, 0x1a, 0xa6, 0xe1, 0x08, 0xa2, 0xac, 0x00, 0x04, 0x73, 0x44, + 0x15, 0x18, 0x62, 0x71, 0xe6, 0xd6, 0x93, 0x91, 0xfa, 0x08, 0xb3, 0x90, 0x85, 0x9f, 0x8d, 0xdd, + 0x71, 0x74, 0x03, 0xc0, 0x26, 0x4d, 0x8b, 0x50, 0x9e, 0x0c, 0x72, 0xd1, 0x4b, 0xe5, 0x86, 0x4f, + 0xc1, 0x21, 0x2e, 0xf9, 0x47, 0x12, 0x8c, 0xad, 0x11, 0xfa, 0xa9, 0x61, 0xed, 0x6d, 0xf0, 0xc7, + 0x00, 0xa7, 0x90, 0x6c, 0x71, 0x24, 0xd9, 0xbe, 0xd1, 0x63, 0x65, 0x22, 0xd6, 0x65, 0xa5, 0x5c, + 0xf9, 0x4b, 0x09, 0x66, 0x23, 0x9c, 0x77, 0x83, 0xad, 0xbb, 0x05, 0x43, 0xa6, 0x61, 0x51, 0xaf, + 0x10, 0x0f, 0xa4, 0x90, 0xa5, 0xb1, 0x50, 0x29, 0x66, 0x30, 0xd8, 0x45, 0x43, 0x2b, 0x90, 0xa3, + 0x86, 0x08, 0xd5, 0xc1, 0x30, 0x09, 0xb1, 0xea, 0x20, 0x30, 0x73, 0x9b, 0x06, 0xce, 0x51, 0x83, + 0x2d, 0x44, 0x39, 0xc2, 0x15, 0x4e, 0x3e, 0x2f, 0x68, 0x06, 0x18, 0x0a, 0x3b, 0x96, 0xd1, 0x39, + 0xf1, 0x1c, 0xfc, 0x85, 0xb8, 0x67, 0x19, 0x1d, 0xcc, 0xb1, 0xe4, 0xaf, 0x24, 0x98, 0x8a, 0x70, + 0x9e, 0x42, 0xe2, 0x7f, 0x18, 0x4d, 0xfc, 0x57, 0x07, 0x99, 0x48, 0x46, 0xfa, 0xff, 0x2a, 0x17, + 0x9b, 0x06, 0x9b, 0x30, 0xda, 0x81, 0x51, 0xd3, 0x68, 0x35, 0x9e, 0xc3, 0x75, 0xe0, 0x04, 0xab, + 0x9b, 0x1b, 0x01, 0x16, 0x0e, 0x03, 0xa3, 0x03, 0x98, 0xd2, 0x95, 0x0e, 0xb1, 0x4d, 0xa5, 0x49, + 0x1a, 0xcf, 0xe1, 0x80, 0xe4, 0x25, 0x7e, 0xdf, 0x10, 0x47, 0xc4, 0x49, 0x25, 0x68, 0x15, 0x8a, + 0xaa, 0xc9, 0xfb, 0x38, 0xd1, 0xbb, 0xf4, 0xac, 0xa2, 0x6e, 0xd7, 0xe7, 0xe6, 0x73, 0xf1, 0x03, + 0x7b, 0x18, 0xf2, 0x7f, 0xc6, 0xa3, 0x81, 0xc5, 0x1f, 0xba, 0x0f, 0x25, 0xfe, 0xac, 0xa6, 0x69, + 0x68, 0xde, 0xcd, 0x00, 0x5b, 0xd9, 0x0d, 0x31, 0xf6, 0xec, 0xb0, 0x72, 0x3e, 0xe5, 0xd0, 0xd7, + 0x23, 0x63, 0x5f, 0x18, 0xad, 0x41, 0xc1, 0xfc, 0x21, 0x1d, 0x0c, 0x2f, 0x72, 0xbc, 0x6d, 0xe1, + 0x38, 0xf2, 0x5f, 0xe7, 0x63, 0xe6, 0xf2, 0x52, 0xf7, 0xe4, 0xb9, 0xad, 0xba, 0xdf, 0x31, 0x65, + 0xae, 0xfc, 0x36, 0x14, 0x45, 0x85, 0x17, 0xc1, 0xfc, 0xf6, 0x20, 0xc1, 0x1c, 0xae, 0x62, 0xfe, + 0x07, 0x8b, 0x37, 0xe8, 0x01, 0xa3, 0x8f, 0x60, 0x98, 0xb8, 0x2a, 0xdc, 0xda, 0x78, 0x6b, 0x10, + 0x15, 0x41, 0x5e, 0x0d, 0x1a, 0x55, 0x31, 0x26, 0x50, 0xd1, 0x7b, 0xcc, 0x5f, 0x8c, 0x97, 0x7d, + 0x04, 0xda, 0xe5, 0x02, 0x2f, 0x57, 0x17, 0xdc, 0x69, 0xfb, 0xc3, 0xcf, 0x0e, 0x2b, 0x10, 0xfc, + 0xc4, 0x61, 0x09, 0xf9, 0x27, 0x12, 0x4c, 0x71, 0x0f, 0x35, 0x1d, 0x4b, 0xa5, 0xdd, 0x53, 0x2b, + 0x4c, 0x8f, 0x22, 0x85, 0xe9, 0xad, 0x1e, 0x6e, 0x49, 0x58, 0x98, 0x59, 0x9c, 0xbe, 0x96, 0xe0, + 0xa5, 0x04, 0xf7, 0x29, 0xe4, 0xc5, 0xad, 0x68, 0x5e, 0x7c, 0x63, 0xd0, 0x09, 0x65, 0xbd, 0x91, + 0x18, 0x4b, 0x99, 0x0e, 0xdf, 0x29, 0x37, 0x00, 0x4c, 0x4b, 0xdd, 0x57, 0x35, 0xd2, 0x16, 0x97, + 0xe0, 0xa5, 0xd0, 0xb3, 0x36, 0x9f, 0x82, 0x43, 0x5c, 0xc8, 0x86, 0x99, 0x16, 0xd9, 0x51, 0x1c, + 0x8d, 0x2e, 0xb4, 0x5a, 0x8b, 0x8a, 0xa9, 0x6c, 0xab, 0x9a, 0x4a, 0x55, 0x71, 0x5c, 0x30, 0x52, + 0xbf, 0xe3, 0x5e, 0x4e, 0xa7, 0x71, 0x3c, 0x3b, 0xac, 0x5c, 0x48, 0xbb, 0x1d, 0xf2, 0x58, 0xba, + 0x38, 0x03, 0x1a, 0x75, 0xa1, 0x6c, 0x91, 0x4f, 0x1c, 0xd5, 0x22, 0xad, 0x25, 0xcb, 0x30, 0x23, + 0x6a, 0xf3, 0x5c, 0xed, 0x1f, 0x1e, 0x1d, 0x56, 0xca, 0x38, 0x83, 0xa7, 0xb7, 0xe2, 0x4c, 0x78, + 0xf4, 0x04, 0xa6, 0x15, 0xf7, 0x35, 0x60, 0x44, 0xab, 0xbb, 0x4b, 0x6e, 0x1f, 0x1d, 0x56, 0xa6, + 0x17, 0x92, 0xe4, 0xde, 0x0a, 0xd3, 0x40, 0x51, 0x0d, 0x8a, 0xfb, 0xfc, 0xad, 0xa2, 0x5d, 0x1e, + 0xe2, 0xf8, 0xac, 0x10, 0x14, 0xdd, 0xe7, 0x8b, 0x0c, 0x73, 0xf8, 0x5e, 0x83, 0xef, 0x3e, 0x8f, + 0x8b, 0x7d, 0x50, 0xb2, 0x5e, 0x52, 0xec, 0x78, 0x7e, 0x62, 0x5c, 0x0a, 0xb2, 0xd6, 0x83, 0x80, + 0x84, 0xc3, 0x7c, 0xe8, 0x31, 0x8c, 0xec, 0x8a, 0x53, 0x09, 0xbb, 0x5c, 0xec, 0xab, 0x08, 0x47, + 0x4e, 0x31, 0xea, 0x53, 0x42, 0xc5, 0x88, 0x37, 0x6c, 0xe3, 0x00, 0x11, 0x5d, 0x81, 0x22, 0xff, + 0xb1, 0xbc, 0xc4, 0x8f, 0xe3, 0x4a, 0x41, 0x6e, 0x7b, 0xe0, 0x0e, 0x63, 0x8f, 0xee, 0xb1, 0x2e, + 0x6f, 0x2c, 0xf2, 0x63, 0xe1, 0x18, 0xeb, 0xf2, 0xc6, 0x22, 0xf6, 0xe8, 0xe8, 0x63, 0x28, 0xda, + 0x64, 0x45, 0xd5, 0x9d, 0x83, 0x32, 0xf4, 0x75, 0xa9, 0xdc, 0xb8, 0xcb, 0xb9, 0x63, 0x07, 0x63, + 0x81, 0x06, 0x41, 0xc7, 0x1e, 0x2c, 0xda, 0x85, 0x11, 0xcb, 0xd1, 0x17, 0xec, 0x2d, 0x9b, 0x58, + 0xe5, 0x51, 0xae, 0xa3, 0x57, 0x3a, 0xc7, 0x1e, 0x7f, 0x5c, 0x8b, 0xef, 0x21, 0x9f, 0x03, 0x07, + 0xe0, 0xe8, 0xef, 0x24, 0x40, 0xb6, 0x63, 0x9a, 0x1a, 0xe9, 0x10, 0x9d, 0x2a, 0x1a, 0x3f, 0x8b, + 0xb3, 0xcb, 0x67, 0xb9, 0xce, 0x3f, 0xea, 0x35, 0xaf, 0x84, 0x60, 0x5c, 0xb9, 0x7f, 0xe8, 0x9d, + 0x64, 0xc5, 0x29, 0x7a, 0x99, 0x6b, 0x77, 0x6c, 0xfe, 0x77, 0x79, 0xac, 0x2f, 0xd7, 0xa6, 0x9f, + 0x39, 0x06, 0xae, 0x15, 0x74, 0xec, 0xc1, 0xa2, 0x47, 0x30, 0x63, 0x11, 0xa5, 0xb5, 0xae, 0x6b, + 0x5d, 0x6c, 0x18, 0xf4, 0x9e, 0xaa, 0x11, 0xbb, 0x6b, 0x53, 0xd2, 0x29, 0x8f, 0xf3, 0x65, 0xf7, + 0xdf, 0x7e, 0xe0, 0x54, 0x2e, 0x9c, 0x21, 0x8d, 0x3a, 0x50, 0xf1, 0x52, 0x06, 0xdb, 0x4f, 0x7e, + 0xce, 0xba, 0x6b, 0x37, 0x15, 0xcd, 0xbd, 0x07, 0x98, 0xe0, 0x0a, 0x5e, 0x3b, 0x3a, 0xac, 0x54, + 0x96, 0x8e, 0x67, 0xc5, 0xbd, 0xb0, 0xd0, 0x07, 0x50, 0x56, 0xb2, 0xf4, 0x4c, 0x72, 0x3d, 0xaf, + 0xb0, 0x3c, 0x94, 0xa9, 0x20, 0x53, 0x1a, 0x51, 0x98, 0x54, 0xa2, 0x8f, 0x8e, 0xed, 0xf2, 0x54, + 0x5f, 0x07, 0x91, 0xb1, 0xb7, 0xca, 0xc1, 0x61, 0x44, 0x8c, 0x60, 0xe3, 0x84, 0x06, 0xf4, 0x17, + 0x80, 0x94, 0xf8, 0x3b, 0x69, 0xbb, 0x8c, 0xfa, 0x2a, 0x3f, 0x89, 0x07, 0xd6, 0x41, 0xd8, 0x25, + 0x48, 0x36, 0x4e, 0xd1, 0xc3, 0x1f, 0x6f, 0x88, 0xa3, 0xfc, 0xd3, 0x79, 0x00, 0x3b, 0xd8, 0xe3, + 0x8d, 0xc0, 0xb4, 0xe7, 0xf6, 0x78, 0x23, 0x04, 0x79, 0xfc, 0xe1, 0xe1, 0x2f, 0x72, 0x30, 0x1d, + 0x30, 0xf7, 0xfd, 0x78, 0x23, 0x45, 0xe4, 0x77, 0x8f, 0x60, 0x7b, 0x3f, 0x82, 0xfd, 0x52, 0x82, + 0xf1, 0xc0, 0x75, 0xbf, 0x79, 0x0f, 0x2a, 0x02, 0xdb, 0x32, 0x5a, 0xbc, 0xff, 0xce, 0x85, 0x27, + 0xf0, 0x5b, 0x7f, 0xab, 0xff, 0xc3, 0x5f, 0xae, 0xca, 0x5f, 0xe7, 0x61, 0x32, 0xbe, 0x1b, 0x23, + 0x97, 0xbf, 0x52, 0xcf, 0xcb, 0xdf, 0x0d, 0x38, 0xb7, 0xe3, 0x68, 0x5a, 0x97, 0xbb, 0x21, 0x74, + 0x03, 0xec, 0x5e, 0xde, 0xbc, 0x22, 0x24, 0xcf, 0xdd, 0x4b, 0xe1, 0xc1, 0xa9, 0x92, 0x19, 0x17, + 0xd9, 0xf9, 0x13, 0x5d, 0x64, 0x27, 0xee, 0x55, 0x0b, 0x03, 0xdc, 0xab, 0xa6, 0x5e, 0x4a, 0x0f, + 0x9d, 0xe0, 0x52, 0xfa, 0x24, 0xb7, 0xc8, 0x29, 0x49, 0xac, 0xe7, 0xa3, 0xc6, 0x57, 0x60, 0x4e, + 0x88, 0x51, 0x7e, 0xc1, 0xab, 0x53, 0xcb, 0xd0, 0x34, 0x62, 0x2d, 0x39, 0x9d, 0x4e, 0x57, 0x7e, + 0x17, 0xc6, 0xa3, 0x4f, 0x17, 0xdc, 0x95, 0x76, 0x5f, 0x4f, 0x88, 0x2b, 0xb4, 0xd0, 0x4a, 0xbb, + 0xe3, 0xd8, 0xe7, 0x90, 0xff, 0x46, 0x82, 0x99, 0xf4, 0x27, 0x8a, 0x48, 0x83, 0xf1, 0x8e, 0x72, + 0x10, 0x7e, 0x36, 0x2a, 0x9d, 0xf0, 0x70, 0x83, 0xdf, 0x59, 0xaf, 0x46, 0xb0, 0x70, 0x0c, 0x5b, + 0xfe, 0x5e, 0x82, 0xd9, 0x8c, 0xdb, 0xe2, 0xd3, 0xb5, 0x04, 0x7d, 0x08, 0xa5, 0x8e, 0x72, 0xd0, + 0x70, 0xac, 0x36, 0x39, 0xf1, 0x71, 0x0e, 0xcf, 0x18, 0xab, 0x02, 0x05, 0xfb, 0x78, 0xf2, 0xe7, + 0x12, 0x94, 0xb3, 0x1a, 0x6b, 0x74, 0x33, 0x72, 0xaf, 0xfd, 0x6a, 0xec, 0x5e, 0x7b, 0x2a, 0x21, + 0xf7, 0x82, 0x6e, 0xb5, 0xff, 0x4b, 0x82, 0x99, 0xf4, 0x0f, 0x0c, 0xf4, 0x66, 0xc4, 0xc2, 0x4a, + 0xcc, 0xc2, 0x89, 0x98, 0x94, 0xb0, 0xef, 0x23, 0x18, 0x17, 0x9f, 0x21, 0x02, 0x46, 0x78, 0x55, + 0x4e, 0xcb, 0x95, 0x02, 0xc2, 0x6b, 0xbb, 0xf9, 0x7a, 0x45, 0xc7, 0x70, 0x0c, 0x4d, 0xfe, 0xdb, + 0x1c, 0x0c, 0x35, 0x9a, 0x8a, 0x46, 0x4e, 0xa1, 0xcd, 0x7a, 0x3f, 0xd2, 0x66, 0xf5, 0xfa, 0x17, + 0x0f, 0x6e, 0x55, 0x66, 0x87, 0x85, 0x63, 0x1d, 0xd6, 0xeb, 0x7d, 0xa1, 0x1d, 0xdf, 0x5c, 0xfd, + 0x01, 0x8c, 0xf8, 0x4a, 0x07, 0xcb, 0xf9, 0xf2, 0xbf, 0xe7, 0x60, 0x34, 0xa4, 0x62, 0xc0, 0x8a, + 0xb1, 0x13, 0xa9, 0xb4, 0xfd, 0xfc, 0x63, 0x5d, 0x48, 0x57, 0xd5, 0xab, 0xad, 0xee, 0x13, 0xc5, + 0xe0, 0x51, 0x5a, 0xb2, 0xe4, 0xbe, 0x0b, 0xe3, 0x94, 0xff, 0xe3, 0x99, 0x7f, 0x08, 0x9a, 0xe7, + 0xb1, 0xe8, 0x3f, 0x6c, 0xdd, 0x8c, 0x50, 0x71, 0x8c, 0x7b, 0xee, 0x0e, 0x8c, 0x45, 0x94, 0x0d, + 0xf4, 0xc2, 0xf0, 0x7f, 0x25, 0x78, 0xb5, 0xe7, 0x27, 0x2a, 0xaa, 0x47, 0x36, 0x49, 0x35, 0xb6, + 0x49, 0xe6, 0xb3, 0x01, 0x5e, 0xdc, 0x4b, 0x95, 0xfa, 0xb5, 0xa7, 0xdf, 0xcd, 0x9f, 0xf9, 0xe6, + 0xbb, 0xf9, 0x33, 0xdf, 0x7e, 0x37, 0x7f, 0xe6, 0xaf, 0x8e, 0xe6, 0xa5, 0xa7, 0x47, 0xf3, 0xd2, + 0x37, 0x47, 0xf3, 0xd2, 0xb7, 0x47, 0xf3, 0xd2, 0xcf, 0x8e, 0xe6, 0xa5, 0x7f, 0xfc, 0x7e, 0xfe, + 0xcc, 0x87, 0x45, 0x01, 0xf7, 0xeb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xb3, 0xc8, 0xe2, 0x54, + 0x3c, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index cd4f2a01..7a3e7029 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -32,11 +32,10 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// An APIVersion represents a single concrete version of an object model. -message APIVersion { - // Name of this version (e.g. 'v1'). - // +optional - optional string name = 1; +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // Driver is the name of the Flexvolume driver. + optional string driver = 1; } // defines the host volume conditions that will be enabled by a policy @@ -45,7 +44,7 @@ message AllowedHostPath { // is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -100,6 +99,27 @@ message DaemonSet { optional DaemonSetStatus status = 3; } +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. @@ -197,6 +217,12 @@ message DaemonSetStatus { // create the name for the newest ControllerRevision. // +optional optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; } message DaemonSetUpdateStrategy { @@ -441,6 +467,7 @@ message IDRange { optional int64 max = 2; } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -582,6 +609,7 @@ message IngressTLS { optional string secretName = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. @@ -594,6 +622,7 @@ message NetworkPolicy { optional NetworkPolicySpec spec = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -615,6 +644,7 @@ message NetworkPolicyEgressRule { repeated NetworkPolicyPeer to = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. message NetworkPolicyIngressRule { // List of ports which should be made accessible on the pods selected for this rule. @@ -634,6 +664,7 @@ message NetworkPolicyIngressRule { repeated NetworkPolicyPeer from = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. @@ -645,6 +676,7 @@ message NetworkPolicyList { repeated NetworkPolicy items = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. message NetworkPolicyPeer { // This is a label selector which selects Pods in this namespace. // This field follows standard label selector semantics. @@ -664,6 +696,7 @@ message NetworkPolicyPeer { optional IPBlock ipBlock = 3; } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. message NetworkPolicyPort { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -679,6 +712,7 @@ message NetworkPolicyPort { optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; } +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. message NetworkPolicySpec { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -752,8 +786,9 @@ message PodSecurityPolicySpec { optional bool privileged = 1; // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional repeated string defaultAddCapabilities = 2; @@ -822,6 +857,12 @@ message PodSecurityPolicySpec { // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional repeated AllowedHostPath allowedHostPaths = 17; + + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -1074,50 +1115,3 @@ message SupplementalGroupsStrategyOptions { repeated IDRange ranges = 2; } -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -message ThirdPartyResource { - // Standard object metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Description is the description of this object. - // +optional - optional string description = 2; - - // Versions are versions for this third party object - // +optional - repeated APIVersion versions = 3; -} - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -message ThirdPartyResourceData { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the raw JSON data for this data. - // +optional - optional bytes data = 2; -} - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -message ThirdPartyResourceDataList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdpartyResourceData. - repeated ThirdPartyResourceData items = 2; -} - -// ThirdPartyResourceList is a list of ThirdPartyResources. -message ThirdPartyResourceList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ThirdPartyResources. - repeated ThirdPartyResource items = 2; -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index 626701cf..7625f678 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Deployment{}, @@ -49,12 +49,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentRollback{}, &ReplicationControllerDummy{}, &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index d3cff2ab..c3d9f72d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -100,63 +100,6 @@ type CustomMetricCurrentStatusList struct { Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` } -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Description is the description of this object. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,2,opt,name=description"` - - // Versions are versions for this third party object - // +optional - Versions []APIVersion `json:"versions,omitempty" protobuf:"bytes,3,rep,name=versions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResourceList is a list of ThirdPartyResources. -type ThirdPartyResourceList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdPartyResources. - Items []ThirdPartyResource `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// An APIVersion represents a single concrete version of an object model. -type APIVersion struct { - // Name of this version (e.g. 'v1'). - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the raw JSON data for this data. - // +optional - Data []byte `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` -} - // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -532,6 +475,33 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` } // +genclient @@ -588,20 +558,6 @@ type DaemonSetList struct { Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ThirdPartyResrouceDataList is a list of ThirdPartyResourceData. -type ThirdPartyResourceDataList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ThirdpartyResourceData. - Items []ThirdPartyResourceData `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -926,8 +882,9 @@ type PodSecurityPolicySpec struct { // +optional Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` // DefaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capabiility in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // unless the pod spec specifically drops the capability. You may not list a capability in both + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -981,6 +938,11 @@ type PodSecurityPolicySpec struct { // is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` } // defines the host volume conditions that will be enabled by a policy @@ -1024,6 +986,12 @@ var ( All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + // Host Port Range defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -1144,6 +1112,7 @@ type PodSecurityPolicyList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. // NetworkPolicy describes what network traffic is allowed for a set of Pods type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` @@ -1157,6 +1126,7 @@ type NetworkPolicy struct { Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } +// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. // Policy Type string describes the NetworkPolicy type // This type is beta-level in 1.8 type PolicyType string @@ -1168,6 +1138,7 @@ const ( PolicyTypeEgress PolicyType = "Egress" ) +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. type NetworkPolicySpec struct { // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules // is applied to any pods selected by this field. Multiple network policies can select the @@ -1210,6 +1181,7 @@ type NetworkPolicySpec struct { PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. // This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. type NetworkPolicyIngressRule struct { // List of ports which should be made accessible on the pods selected for this rule. @@ -1229,6 +1201,7 @@ type NetworkPolicyIngressRule struct { From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. // NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods // matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. // This type is beta-level in 1.8 @@ -1250,6 +1223,7 @@ type NetworkPolicyEgressRule struct { To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. type NetworkPolicyPort struct { // Optional. The protocol (TCP or UDP) which traffic must match. // If not specified, this field defaults to TCP. @@ -1265,6 +1239,7 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. // IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods // matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should // not be included within this rule. @@ -1279,6 +1254,7 @@ type IPBlock struct { Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` } +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. type NetworkPolicyPeer struct { // Exactly one of the following must be specified. @@ -1302,6 +1278,7 @@ type NetworkPolicyPeer struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. // Network Policy List is a list of NetworkPolicy objects. type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index f65f895e..236d934f 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,13 +27,13 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE -var map_APIVersion = map[string]string{ - "": "An APIVersion represents a single concrete version of an object model.", - "name": "Name of this version (e.g. 'v1').", +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "Driver is the name of the Flexvolume driver.", } -func (APIVersion) SwaggerDoc() map[string]string { - return map_APIVersion +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume } var map_AllowedHostPath = map[string]string{ @@ -75,6 +75,19 @@ func (DaemonSet) SwaggerDoc() map[string]string { return map_DaemonSet } +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", @@ -110,6 +123,7 @@ var map_DaemonSetStatus = map[string]string{ "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", } func (DaemonSetStatus) SwaggerDoc() map[string]string { @@ -264,7 +278,7 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } @@ -352,7 +366,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { } var map_NetworkPolicy = map[string]string{ - "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -362,7 +376,7 @@ func (NetworkPolicy) SwaggerDoc() map[string]string { } var map_NetworkPolicyEgressRule = map[string]string{ - "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", } @@ -372,7 +386,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyIngressRule = map[string]string{ - "": "This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } @@ -382,7 +396,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { } var map_NetworkPolicyList = map[string]string{ - "": "Network Policy List is a list of NetworkPolicy objects.", + "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -392,6 +406,7 @@ func (NetworkPolicyList) SwaggerDoc() map[string]string { } var map_NetworkPolicyPeer = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", "podSelector": "This is a label selector which selects Pods in this namespace. This field follows standard label selector semantics. If present but empty, this selector selects all pods in this namespace.", "namespaceSelector": "Selects Namespaces using cluster scoped-labels. This matches all pods in all namespaces selected by this label selector. This field follows standard label selector semantics. If present but empty, this selector selects all namespaces.", "ipBlock": "IPBlock defines policy on a particular IPBlock", @@ -402,6 +417,7 @@ func (NetworkPolicyPeer) SwaggerDoc() map[string]string { } var map_NetworkPolicyPort = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", "protocol": "Optional. The protocol (TCP or UDP) which traffic must match. If not specified, this field defaults to TCP.", "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", } @@ -411,6 +427,7 @@ func (NetworkPolicyPort) SwaggerDoc() map[string]string { } var map_NetworkPolicySpec = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", @@ -444,7 +461,7 @@ func (PodSecurityPolicyList) SwaggerDoc() map[string]string { var map_PodSecurityPolicySpec = map[string]string{ "": "Pod Security Policy Spec defines the policy enforced.", "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capabiility in both DefaultAddCapabilities and RequiredDropCapabilities.", + "defaultAddCapabilities": "DefaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the AllowedCapabilities list.", "requiredDropCapabilities": "RequiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", "allowedCapabilities": "AllowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both AllowedCapabilities and RequiredDropCapabilities.", "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that all plugins may be used.", @@ -460,6 +477,7 @@ var map_PodSecurityPolicySpec = map[string]string{ "defaultAllowPrivilegeEscalation": "DefaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", "allowPrivilegeEscalation": "AllowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"Volumes\" field.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -623,45 +641,4 @@ func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { return map_SupplementalGroupsStrategyOptions } -var map_ThirdPartyResource = map[string]string{ - "": "A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource types to the API. It consists of one or more Versions of the api.", - "metadata": "Standard object metadata", - "description": "Description is the description of this object.", - "versions": "Versions are versions for this third party object", -} - -func (ThirdPartyResource) SwaggerDoc() map[string]string { - return map_ThirdPartyResource -} - -var map_ThirdPartyResourceData = map[string]string{ - "": "An internal object, used for versioned storage in etcd. Not exposed to the end user.", - "metadata": "Standard object metadata.", - "data": "Data is the raw JSON data for this data.", -} - -func (ThirdPartyResourceData) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceData -} - -var map_ThirdPartyResourceDataList = map[string]string{ - "": "ThirdPartyResrouceDataList is a list of ThirdPartyResourceData.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ThirdpartyResourceData.", -} - -func (ThirdPartyResourceDataList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceDataList -} - -var map_ThirdPartyResourceList = map[string]string{ - "": "ThirdPartyResourceList is a list of ThirdPartyResources.", - "metadata": "Standard list metadata.", - "items": "Items is the list of ThirdPartyResources.", -} - -func (ThirdPartyResourceList) SwaggerDoc() map[string]string { - return map_ThirdPartyResourceList -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index c173b3f4..02a84ccc 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -23,281 +23,22 @@ package v1beta1 import ( core_v1 "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IDRange).DeepCopyInto(out.(*IDRange)) - return nil - }, InType: reflect.TypeOf(&IDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -427,6 +168,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -509,6 +267,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1445,6 +1210,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1831,135 +1601,3 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index 8bcc30b0..ef9ae2ae 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index 62402b87..ae28d2f2 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -187,3 +187,4 @@ message NetworkPolicySpec { // +optional repeated string policyTypes = 4; } + diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go index 2ba9951d..f47f22e9 100644 --- a/vendor/k8s.io/api/networking/v1/register.go +++ b/vendor/k8s.io/api/networking/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &NetworkPolicy{}, diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 0e670966..955e7434 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -23,57 +23,10 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 1e9f4974..9c456f92 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index af74c52c..a276be1c 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -111,3 +111,4 @@ message PodDisruptionBudgetStatus { // total number of pods counted by this disruption budget optional int32 expectedPods = 6; } + diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go index e0c6247a..d77f1304 100644 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodDisruptionBudget{}, diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 93c201e2..70872f09 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -22,45 +22,10 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Eviction).DeepCopyInto(out.(*Eviction)) - return nil - }, InType: reflect.TypeOf(&Eviction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudget).DeepCopyInto(out.(*PodDisruptionBudget)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetList).DeepCopyInto(out.(*PodDisruptionBudgetList)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetSpec).DeepCopyInto(out.(*PodDisruptionBudgetSpec)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodDisruptionBudgetStatus).DeepCopyInto(out.(*PodDisruptionBudgetStatus)) - return nil - }, InType: reflect.TypeOf(&PodDisruptionBudgetStatus{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Eviction) DeepCopyInto(out *Eviction) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 737261a0..28ceb269 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index 1285ac19..1530d379 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,57 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 743 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x94, 0x4f, 0x6b, 0x13, 0x4f, - 0x18, 0xc7, 0x33, 0xf9, 0x43, 0xb3, 0x93, 0x5f, 0xc8, 0xaf, 0x2b, 0xc8, 0x52, 0x61, 0x13, 0x72, - 0x90, 0x80, 0xba, 0x6b, 0xaa, 0xa8, 0x20, 0x3d, 0xb8, 0x15, 0xa5, 0xb4, 0xd6, 0x32, 0xa2, 0x07, - 0xf1, 0xe0, 0x66, 0x33, 0x4d, 0xc7, 0x64, 0xff, 0x30, 0x33, 0x1b, 0x28, 0x5e, 0xc4, 0x9b, 0x37, - 0xdf, 0x85, 0x17, 0xbd, 0xe9, 0x2b, 0xf0, 0xd2, 0x63, 0x8f, 0x3d, 0x05, 0xbb, 0xbe, 0x10, 0x65, - 0x66, 0x77, 0xb3, 0x49, 0x93, 0xd8, 0x9e, 0x02, 0xe2, 0x29, 0x99, 0xe7, 0xf9, 0x7c, 0x9f, 0xf9, - 0xce, 0xb3, 0x33, 0x0f, 0xbc, 0xdf, 0xbf, 0xc7, 0x0c, 0xe2, 0x9b, 0xfd, 0xb0, 0x83, 0xa9, 0x87, - 0x39, 0x66, 0xe6, 0x10, 0x7b, 0x5d, 0x9f, 0x9a, 0x49, 0xc2, 0x0e, 0x88, 0x49, 0x3b, 0xb6, 0x63, - 0x0e, 0xdb, 0x66, 0x0f, 0x7b, 0x98, 0xda, 0x1c, 0x77, 0x8d, 0x80, 0xfa, 0xdc, 0x57, 0xd5, 0x98, - 0x31, 0xec, 0x80, 0x18, 0x82, 0x31, 0x86, 0xed, 0xb5, 0x1b, 0x3d, 0xc2, 0x0f, 0xc2, 0x8e, 0xe1, - 0xf8, 0xae, 0xd9, 0xf3, 0x7b, 0xbe, 0x29, 0xd1, 0x4e, 0xb8, 0x2f, 0x57, 0x72, 0x21, 0xff, 0xc5, - 0x25, 0xd6, 0x6e, 0x67, 0xdb, 0xb8, 0xb6, 0x73, 0x40, 0x3c, 0x4c, 0x0f, 0xcd, 0xa0, 0xdf, 0x13, - 0x01, 0x66, 0xba, 0x98, 0xdb, 0x73, 0x36, 0x5e, 0x33, 0x17, 0xa9, 0x68, 0xe8, 0x71, 0xe2, 0xe2, - 0x19, 0xc1, 0x9d, 0xf3, 0x04, 0xcc, 0x39, 0xc0, 0xae, 0x3d, 0xa3, 0xbb, 0xb5, 0x48, 0x17, 0x72, - 0x32, 0x30, 0x89, 0xc7, 0x19, 0xa7, 0x67, 0x45, 0xcd, 0xaf, 0x00, 0x56, 0x36, 0x07, 0x21, 0xe3, - 0x98, 0x22, 0x7f, 0x80, 0xd5, 0xd7, 0xb0, 0x2c, 0x0e, 0xd2, 0xb5, 0xb9, 0xad, 0x81, 0x06, 0x68, - 0x55, 0xd6, 0x6f, 0x1a, 0x59, 0xe7, 0xc6, 0x75, 0x8d, 0xa0, 0xdf, 0x13, 0x01, 0x66, 0x08, 0xda, - 0x18, 0xb6, 0x8d, 0xa7, 0x9d, 0x37, 0xd8, 0xe1, 0x4f, 0x30, 0xb7, 0x2d, 0xf5, 0x68, 0x54, 0xcf, - 0x45, 0xa3, 0x3a, 0xcc, 0x62, 0x68, 0x5c, 0x55, 0xdd, 0x84, 0x25, 0x1a, 0x0e, 0x30, 0xd3, 0xf2, - 0x8d, 0x42, 0xab, 0xb2, 0xae, 0x1b, 0xb3, 0x1f, 0xc6, 0xd8, 0xf3, 0x07, 0xc4, 0x39, 0x44, 0xe1, - 0x00, 0x5b, 0xd5, 0xa4, 0x58, 0x49, 0xac, 0x18, 0x8a, 0xb5, 0xcd, 0x0f, 0x79, 0xa8, 0x4e, 0xd8, - 0xb6, 0x88, 0xd7, 0x25, 0x5e, 0x6f, 0x09, 0xee, 0xb7, 0x60, 0x99, 0x85, 0x32, 0x91, 0x1e, 0xe0, - 0xca, 0xbc, 0x03, 0x3c, 0x8b, 0x19, 0xeb, 0xff, 0xa4, 0x58, 0x39, 0x09, 0x30, 0x34, 0x96, 0xab, - 0x8f, 0xe0, 0x0a, 0xf5, 0x07, 0x18, 0xe1, 0x7d, 0xad, 0x20, 0xbd, 0xce, 0xad, 0x84, 0x62, 0xc4, - 0xaa, 0x25, 0x95, 0x56, 0x92, 0x00, 0x4a, 0xc5, 0xcd, 0xef, 0x00, 0x5e, 0x9e, 0xed, 0xc5, 0x0e, - 0x61, 0x5c, 0x7d, 0x35, 0xd3, 0x0f, 0xe3, 0x62, 0xfd, 0x10, 0x6a, 0xd9, 0x8d, 0xf1, 0x01, 0xd2, - 0xc8, 0x44, 0x2f, 0xb6, 0x61, 0x89, 0x70, 0xec, 0xa6, 0x8d, 0xb8, 0x3a, 0xcf, 0xfe, 0xac, 0xb1, - 0xec, 0x8b, 0x6e, 0x09, 0x31, 0x8a, 0x6b, 0x34, 0xbf, 0x01, 0x58, 0x9b, 0x80, 0x97, 0x60, 0xff, - 0xe1, 0xb4, 0xfd, 0xfa, 0x79, 0xf6, 0xe7, 0xfb, 0xfe, 0x05, 0x20, 0xcc, 0xae, 0xab, 0x5a, 0x87, - 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x81, 0x46, 0xa1, 0xa5, 0x58, 0x8a, 0xe0, 0x5f, 0x88, 0x00, 0x8a, - 0xe3, 0xea, 0x35, 0xa8, 0xd8, 0x01, 0x79, 0x4c, 0xfd, 0x30, 0x88, 0x77, 0x56, 0xac, 0x6a, 0x34, - 0xaa, 0x2b, 0x0f, 0xf6, 0xb6, 0xe2, 0x20, 0xca, 0xf2, 0x02, 0xa6, 0x98, 0xf9, 0x21, 0x75, 0x30, - 0xd3, 0x0a, 0x19, 0x8c, 0xd2, 0x20, 0xca, 0xf2, 0xea, 0x5d, 0x58, 0x4d, 0x17, 0xbb, 0xb6, 0x8b, - 0x99, 0x56, 0x94, 0x82, 0xd5, 0x68, 0x54, 0xaf, 0xa2, 0xc9, 0x04, 0x9a, 0xe6, 0xd4, 0x0d, 0x58, - 0xf3, 0x7c, 0x2f, 0x45, 0x9e, 0xa3, 0x1d, 0xa6, 0x95, 0xa4, 0xf4, 0x52, 0x34, 0xaa, 0xd7, 0x76, - 0xa7, 0x53, 0xe8, 0x2c, 0xdb, 0xfc, 0x02, 0x60, 0xf1, 0x6f, 0x9a, 0x1d, 0xef, 0xf3, 0xb0, 0xf2, - 0xcf, 0x0f, 0x0d, 0xf1, 0xdc, 0x96, 0x3b, 0x2d, 0x2e, 0xf2, 0xdc, 0xce, 0x1f, 0x13, 0x9f, 0x00, - 0x2c, 0x2f, 0x69, 0x3e, 0x6c, 0x4c, 0x1b, 0xd6, 0x16, 0x1a, 0x9e, 0xef, 0xf4, 0x2d, 0x4c, 0xbb, - 0xae, 0x5e, 0x87, 0xe5, 0xf4, 0x4d, 0x4b, 0x9f, 0x4a, 0xb6, 0x6f, 0xfa, 0xec, 0xd1, 0x98, 0x50, - 0x1b, 0xb0, 0xd8, 0x27, 0x5e, 0x57, 0xcb, 0x4b, 0xf2, 0xbf, 0x84, 0x2c, 0x6e, 0x13, 0xaf, 0x8b, - 0x64, 0x46, 0x10, 0x9e, 0xed, 0x62, 0x79, 0x03, 0x26, 0x08, 0xf1, 0x9a, 0x91, 0xcc, 0x34, 0x3f, - 0x03, 0xb8, 0x92, 0xdc, 0x9e, 0x71, 0x3d, 0xb0, 0xb0, 0xde, 0xa4, 0xbf, 0xfc, 0x45, 0xfc, 0xfd, - 0x79, 0x77, 0xd5, 0x84, 0x8a, 0xf8, 0x65, 0x81, 0xed, 0x60, 0xad, 0x28, 0xb1, 0xd5, 0x04, 0x53, - 0x76, 0xd3, 0x04, 0xca, 0x18, 0xab, 0x75, 0x74, 0xaa, 0xe7, 0x8e, 0x4f, 0xf5, 0xdc, 0xc9, 0xa9, - 0x9e, 0x7b, 0x17, 0xe9, 0xe0, 0x28, 0xd2, 0xc1, 0x71, 0xa4, 0x83, 0x93, 0x48, 0x07, 0x3f, 0x22, - 0x1d, 0x7c, 0xfc, 0xa9, 0xe7, 0x5e, 0xe6, 0x87, 0xed, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x66, - 0x92, 0x08, 0x1d, 0x04, 0x0a, 0x00, 0x00, + // 827 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x8b, 0x23, 0x45, + 0x18, 0x4d, 0x65, 0x12, 0x26, 0x5d, 0x31, 0xc4, 0x2d, 0x17, 0x69, 0xa2, 0x74, 0x86, 0x16, 0x24, + 0xa0, 0x76, 0x9b, 0x5d, 0x51, 0x41, 0xf6, 0xb0, 0xbd, 0xa2, 0x0c, 0x3b, 0x8e, 0x4b, 0x2d, 0x7a, + 0x10, 0x0f, 0x56, 0x77, 0x6a, 0x3b, 0x65, 0xfa, 0x17, 0x55, 0xd5, 0x81, 0xc5, 0x8b, 0x08, 0x1e, + 0xbc, 0x79, 0xd4, 0xbf, 0xc0, 0x8b, 0x1e, 0xfd, 0x0b, 0xbc, 0xcc, 0x71, 0x8f, 0x7b, 0x0a, 0x4e, + 0xfb, 0x87, 0x28, 0xfd, 0x2b, 0x9d, 0xa4, 0x3b, 0x4e, 0x4e, 0x01, 0xf1, 0x34, 0x53, 0xdf, 0xf7, + 0xde, 0xfb, 0x5e, 0xbf, 0xa9, 0xaf, 0x06, 0x7e, 0xb0, 0x78, 0x5f, 0x18, 0x2c, 0x34, 0x17, 0xb1, + 0x4d, 0x79, 0x40, 0x25, 0x15, 0xe6, 0x92, 0x06, 0xb3, 0x90, 0x9b, 0x45, 0x83, 0x44, 0xcc, 0xe4, + 0x36, 0x71, 0xcc, 0xe5, 0xd4, 0x74, 0x69, 0x40, 0x39, 0x91, 0x74, 0x66, 0x44, 0x3c, 0x94, 0x21, + 0x42, 0x39, 0xc6, 0x20, 0x11, 0x33, 0x52, 0x8c, 0xb1, 0x9c, 0x8e, 0xde, 0x72, 0x99, 0x9c, 0xc7, + 0xb6, 0xe1, 0x84, 0xbe, 0xe9, 0x86, 0x6e, 0x68, 0x66, 0x50, 0x3b, 0x7e, 0x92, 0x9d, 0xb2, 0x43, + 0xf6, 0x5b, 0x2e, 0x31, 0x9a, 0xd4, 0xc7, 0x10, 0x2f, 0x9a, 0x93, 0xda, 0xb0, 0xd1, 0x3b, 0x15, + 0xd2, 0x27, 0xce, 0x9c, 0x05, 0x94, 0x3f, 0x35, 0xa3, 0x85, 0x9b, 0x16, 0x84, 0xe9, 0x53, 0x49, + 0x1a, 0x2c, 0x8e, 0xcc, 0x7d, 0x2c, 0x1e, 0x07, 0x92, 0xf9, 0xb4, 0x46, 0x78, 0xf7, 0x26, 0x82, + 0x70, 0xe6, 0xd4, 0x27, 0x35, 0xde, 0xdd, 0x7d, 0xbc, 0x58, 0x32, 0xcf, 0x64, 0x81, 0x14, 0x92, + 0xef, 0x92, 0xf4, 0x9f, 0x01, 0x1c, 0xde, 0x77, 0x5d, 0x4e, 0x5d, 0x22, 0x59, 0x18, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0x6f, 0x3b, 0x5e, 0x2c, 0x24, 0xe5, 0x38, 0xf4, 0xe8, 0x63, 0xea, 0x51, + 0x47, 0x86, 0x5c, 0xa8, 0xe0, 0xec, 0x64, 0xd2, 0xbf, 0x73, 0xd7, 0xa8, 0x42, 0x5f, 0x0f, 0x32, + 0xa2, 0x85, 0x9b, 0x16, 0x84, 0x91, 0xe6, 0x60, 0x2c, 0xa7, 0xc6, 0x05, 0xb1, 0xa9, 0x57, 0x72, + 0xad, 0x57, 0xaf, 0x56, 0xe3, 0x56, 0xb2, 0x1a, 0xdf, 0x7e, 0xd0, 0x20, 0x8c, 0x1b, 0xc7, 0xe9, + 0x3f, 0xb5, 0x61, 0x7f, 0x03, 0x8e, 0xbe, 0x82, 0xbd, 0x54, 0x7c, 0x46, 0x24, 0x51, 0xc1, 0x19, + 0x98, 0xf4, 0xef, 0xbc, 0x7d, 0x98, 0x95, 0x4f, 0xed, 0xaf, 0xa9, 0x23, 0x3f, 0xa1, 0x92, 0x58, + 0xa8, 0xf0, 0x01, 0xab, 0x1a, 0x5e, 0xab, 0xa2, 0x07, 0xb0, 0xcb, 0x63, 0x8f, 0x0a, 0xb5, 0x9d, + 0x7d, 0xa9, 0x66, 0xd4, 0xaf, 0x97, 0xf1, 0x28, 0xf4, 0x98, 0xf3, 0x34, 0x0d, 0xca, 0x1a, 0x14, + 0x62, 0xdd, 0xf4, 0x24, 0x70, 0xce, 0x45, 0x36, 0x1c, 0x92, 0xed, 0x44, 0xd5, 0x93, 0xcc, 0xed, + 0x6b, 0x4d, 0x72, 0x3b, 0xe1, 0x5b, 0x2f, 0x25, 0xab, 0xf1, 0xee, 0x5f, 0x04, 0xef, 0x0a, 0xea, + 0x3f, 0xb4, 0x21, 0xda, 0x88, 0xc6, 0x62, 0xc1, 0x8c, 0x05, 0xee, 0x11, 0x12, 0x3a, 0x87, 0x3d, + 0x11, 0x67, 0x8d, 0x32, 0xa4, 0x57, 0x9a, 0xbe, 0xea, 0x71, 0x8e, 0xb1, 0x5e, 0x2c, 0xc4, 0x7a, + 0x45, 0x41, 0xe0, 0x35, 0x1d, 0x7d, 0x04, 0x4f, 0x79, 0xe8, 0x51, 0x4c, 0x9f, 0x14, 0xf9, 0x34, + 0x2a, 0xe1, 0x1c, 0x62, 0x0d, 0x0b, 0xa5, 0xd3, 0xa2, 0x80, 0x4b, 0xb2, 0xfe, 0x07, 0x80, 0x2f, + 0xd7, 0xb3, 0xb8, 0x60, 0x42, 0xa2, 0x2f, 0x6b, 0x79, 0x18, 0x07, 0x5e, 0x5e, 0x26, 0xf2, 0x34, + 0xd6, 0x1f, 0x50, 0x56, 0x36, 0xb2, 0x78, 0x08, 0xbb, 0x4c, 0x52, 0xbf, 0x0c, 0xe2, 0xf5, 0x26, + 0xfb, 0x75, 0x63, 0xd5, 0xad, 0x39, 0x4f, 0xc9, 0x38, 0xd7, 0xd0, 0x7f, 0x07, 0x70, 0xb8, 0x01, + 0x3e, 0x82, 0xfd, 0x0f, 0xb7, 0xed, 0x8f, 0x6f, 0xb2, 0xdf, 0xec, 0xfb, 0x6f, 0x00, 0x61, 0xb5, + 0x12, 0x68, 0x0c, 0xbb, 0x4b, 0xca, 0xed, 0xfc, 0xad, 0x50, 0x2c, 0x25, 0xc5, 0x7f, 0x9e, 0x16, + 0x70, 0x5e, 0x47, 0x6f, 0x40, 0x85, 0x44, 0xec, 0x63, 0x1e, 0xc6, 0x51, 0x3e, 0x59, 0xb1, 0x06, + 0xc9, 0x6a, 0xac, 0xdc, 0x7f, 0x74, 0x9e, 0x17, 0x71, 0xd5, 0x4f, 0xc1, 0x9c, 0x8a, 0x30, 0xe6, + 0x0e, 0x15, 0xea, 0x49, 0x05, 0xc6, 0x65, 0x11, 0x57, 0x7d, 0xf4, 0x1e, 0x1c, 0x94, 0x87, 0x4b, + 0xe2, 0x53, 0xa1, 0x76, 0x32, 0xc2, 0xad, 0x64, 0x35, 0x1e, 0xe0, 0xcd, 0x06, 0xde, 0xc6, 0xa1, + 0x7b, 0x70, 0x18, 0x84, 0x41, 0x09, 0xf9, 0x0c, 0x5f, 0x08, 0xb5, 0x9b, 0x51, 0xb3, 0x5d, 0xbc, + 0xdc, 0x6e, 0xe1, 0x5d, 0xac, 0xfe, 0x1b, 0x80, 0x9d, 0xff, 0xd0, 0xfb, 0xa4, 0x7f, 0xd7, 0x86, + 0xfd, 0xff, 0xfd, 0xa3, 0x91, 0xae, 0xdb, 0x71, 0x5f, 0x8b, 0x43, 0xd6, 0xed, 0xe6, 0x67, 0xe2, + 0x17, 0x00, 0x7b, 0x47, 0x7a, 0x1f, 0xee, 0x6d, 0x1b, 0x56, 0xf7, 0x1a, 0x6e, 0x76, 0xfa, 0x0d, + 0x2c, 0x53, 0x47, 0x6f, 0xc2, 0x5e, 0xb9, 0xd3, 0x99, 0x4f, 0xa5, 0x9a, 0x5b, 0xae, 0x3d, 0x5e, + 0x23, 0xd0, 0x19, 0xec, 0x2c, 0x58, 0x30, 0x53, 0xdb, 0x19, 0xf2, 0x85, 0x02, 0xd9, 0x79, 0xc8, + 0x82, 0x19, 0xce, 0x3a, 0x29, 0x22, 0x20, 0x7e, 0xfe, 0x6f, 0x75, 0x03, 0x91, 0x6e, 0x33, 0xce, + 0x3a, 0xfa, 0xaf, 0x00, 0x9e, 0x16, 0xb7, 0x67, 0xad, 0x07, 0xf6, 0xea, 0x6d, 0xfa, 0x6b, 0x1f, + 0xe2, 0xef, 0xdf, 0xa7, 0x23, 0x13, 0x2a, 0xe9, 0x4f, 0x11, 0x11, 0x87, 0xaa, 0x9d, 0x0c, 0x76, + 0xab, 0x80, 0x29, 0x97, 0x65, 0x03, 0x57, 0x18, 0x6b, 0x72, 0x75, 0xad, 0xb5, 0x9e, 0x5d, 0x6b, + 0xad, 0xe7, 0xd7, 0x5a, 0xeb, 0xdb, 0x44, 0x03, 0x57, 0x89, 0x06, 0x9e, 0x25, 0x1a, 0x78, 0x9e, + 0x68, 0xe0, 0xcf, 0x44, 0x03, 0x3f, 0xfe, 0xa5, 0xb5, 0xbe, 0x68, 0x2f, 0xa7, 0xff, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x32, 0xe3, 0x23, 0xf8, 0x2e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 796a28f4..6edb2779 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -179,3 +194,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go index 7336b545..8f1fd460 100644 --- a/vendor/k8s.io/api/rbac/v1/register.go +++ b/vendor/k8s.io/api/rbac/v1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 8dbd1a8b..91990548 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -170,6 +170,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 7770d408..280ae5a8 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 7ffc8186..e1aab581 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]meta_v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 619b47bf..5236a477 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 31e68aee..c66cadd9 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,53 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 766 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0xf9, 0xa0, 0xc9, 0x86, 0x28, 0xd4, 0x48, 0xc8, 0xea, 0xc1, 0x09, 0x11, 0x48, - 0x95, 0x28, 0x36, 0x2d, 0x08, 0xb8, 0x70, 0x68, 0x7a, 0x40, 0x81, 0xd2, 0x96, 0x45, 0xf4, 0x80, - 0x38, 0xb0, 0x71, 0xb6, 0xc9, 0x12, 0x7f, 0x69, 0xd7, 0x8e, 0x54, 0x71, 0xe1, 0x09, 0x10, 0x17, - 0x1e, 0x83, 0x0b, 0xdc, 0xe0, 0x05, 0xca, 0xad, 0xc7, 0x9e, 0x22, 0x6a, 0x1e, 0x04, 0xb4, 0x6b, - 0x3b, 0x4e, 0x9a, 0x86, 0xf4, 0x14, 0x09, 0x89, 0x93, 0xbd, 0x33, 0xbf, 0xf9, 0xef, 0xcc, 0xec, - 0xee, 0xc0, 0xcd, 0xfe, 0x43, 0xae, 0x53, 0xd7, 0xe8, 0x07, 0x6d, 0xc2, 0x1c, 0xe2, 0x13, 0x6e, - 0x0c, 0x88, 0xd3, 0x71, 0x99, 0x11, 0x3b, 0xb0, 0x47, 0x0d, 0xd6, 0xc6, 0xa6, 0x31, 0x58, 0xc7, - 0x96, 0xd7, 0xc3, 0xeb, 0x46, 0x97, 0x38, 0x84, 0x61, 0x9f, 0x74, 0x74, 0x8f, 0xb9, 0xbe, 0xab, - 0xa8, 0x11, 0xa9, 0x63, 0x8f, 0xea, 0x82, 0xd4, 0x13, 0x72, 0xe5, 0x76, 0x97, 0xfa, 0xbd, 0xa0, - 0xad, 0x9b, 0xae, 0x6d, 0x74, 0xdd, 0xae, 0x6b, 0xc8, 0x80, 0x76, 0x70, 0x20, 0x57, 0x72, 0x21, - 0xff, 0x22, 0xa1, 0x95, 0x7b, 0xe9, 0x96, 0x36, 0x36, 0x7b, 0xd4, 0x21, 0xec, 0xd0, 0xf0, 0xfa, - 0x5d, 0x61, 0xe0, 0x86, 0x4d, 0x7c, 0x6c, 0x0c, 0xa6, 0xb6, 0x5f, 0x31, 0x66, 0x45, 0xb1, 0xc0, - 0xf1, 0xa9, 0x4d, 0xa6, 0x02, 0xee, 0xcf, 0x0b, 0xe0, 0x66, 0x8f, 0xd8, 0x78, 0x2a, 0xee, 0xee, - 0xac, 0xb8, 0xc0, 0xa7, 0x96, 0x41, 0x1d, 0x9f, 0xfb, 0xec, 0x6c, 0x50, 0xe3, 0x1b, 0x80, 0xe5, - 0x2d, 0x2b, 0xe0, 0x3e, 0x61, 0xc8, 0xb5, 0x88, 0xf2, 0x06, 0x16, 0x45, 0x21, 0x1d, 0xec, 0x63, - 0x15, 0xd4, 0xc1, 0x6a, 0x79, 0xe3, 0x8e, 0x9e, 0xf6, 0x6f, 0xa4, 0xab, 0x7b, 0xfd, 0xae, 0x30, - 0x70, 0x5d, 0xd0, 0xfa, 0x60, 0x5d, 0xdf, 0x6d, 0xbf, 0x25, 0xa6, 0xff, 0x8c, 0xf8, 0xb8, 0xa9, - 0x1c, 0x0d, 0x6b, 0x99, 0x70, 0x58, 0x83, 0xa9, 0x0d, 0x8d, 0x54, 0x95, 0x16, 0x2c, 0xb0, 0xc0, - 0x22, 0x5c, 0xcd, 0xd6, 0x73, 0xab, 0xe5, 0x8d, 0x1b, 0xfa, 0xac, 0xe3, 0xd1, 0xf7, 0x5c, 0x8b, - 0x9a, 0x87, 0x28, 0xb0, 0x48, 0xb3, 0x12, 0x4b, 0x16, 0xc4, 0x8a, 0xa3, 0x48, 0xa1, 0xf1, 0x29, - 0x0b, 0x95, 0xb1, 0xe4, 0x9b, 0xd4, 0xe9, 0x50, 0xa7, 0xbb, 0x80, 0x1a, 0x76, 0x61, 0x91, 0x07, - 0xd2, 0x91, 0x94, 0x71, 0x7d, 0x76, 0x19, 0x2f, 0x22, 0xb2, 0x79, 0x25, 0x96, 0x2c, 0xc6, 0x06, - 0x8e, 0x46, 0x22, 0xca, 0x36, 0x5c, 0x62, 0xae, 0x45, 0x10, 0x39, 0x50, 0x73, 0x32, 0xe3, 0xbf, - 0xe8, 0xa1, 0x08, 0x6c, 0x56, 0x63, 0xbd, 0xa5, 0xd8, 0x80, 0x12, 0x89, 0xc6, 0x0f, 0x00, 0xaf, - 0x4d, 0xf7, 0x65, 0x9b, 0x72, 0x5f, 0x79, 0x3d, 0xd5, 0x1b, 0xfd, 0x62, 0xbd, 0x11, 0xd1, 0xb2, - 0x33, 0xa3, 0x32, 0x12, 0xcb, 0x58, 0x5f, 0x9e, 0xc3, 0x02, 0xf5, 0x89, 0x9d, 0x34, 0x65, 0x6d, - 0x76, 0x11, 0xd3, 0xe9, 0xa5, 0x67, 0xdc, 0x12, 0x12, 0x28, 0x52, 0x6a, 0x7c, 0x07, 0xb0, 0x3a, - 0x06, 0x2f, 0xa0, 0x88, 0x27, 0x93, 0x45, 0xdc, 0xbc, 0x58, 0x11, 0xe7, 0x67, 0xff, 0x1b, 0x40, - 0x98, 0x5e, 0x63, 0xa5, 0x06, 0x0b, 0x03, 0xc2, 0xda, 0x5c, 0x05, 0xf5, 0xdc, 0x6a, 0xa9, 0x59, - 0x12, 0xfc, 0xbe, 0x30, 0xa0, 0xc8, 0xae, 0xdc, 0x82, 0x25, 0xec, 0xd1, 0xc7, 0xcc, 0x0d, 0x3c, - 0xae, 0xe6, 0x24, 0x54, 0x09, 0x87, 0xb5, 0xd2, 0xe6, 0x5e, 0x2b, 0x32, 0xa2, 0xd4, 0x2f, 0x60, - 0x46, 0xb8, 0x1b, 0x30, 0x93, 0x70, 0x35, 0x9f, 0xc2, 0x28, 0x31, 0xa2, 0xd4, 0xaf, 0x3c, 0x80, - 0x95, 0x64, 0xb1, 0x83, 0x6d, 0xc2, 0xd5, 0x82, 0x0c, 0x58, 0x0e, 0x87, 0xb5, 0x0a, 0x1a, 0x77, - 0xa0, 0x49, 0x4e, 0x79, 0x04, 0xab, 0x8e, 0xeb, 0x24, 0xc8, 0x4b, 0xb4, 0xcd, 0xd5, 0x4b, 0x32, - 0xf4, 0x6a, 0x38, 0xac, 0x55, 0x77, 0x26, 0x5d, 0xe8, 0x2c, 0xdb, 0xf8, 0x0a, 0x60, 0xfe, 0xdf, - 0x9b, 0x2c, 0x1f, 0xb2, 0xb0, 0xfc, 0x7f, 0xa4, 0x8c, 0x8d, 0x14, 0xf1, 0x0c, 0x17, 0x3b, 0x4b, - 0x2e, 0xfe, 0x0c, 0xe7, 0x0f, 0x91, 0xcf, 0x00, 0x16, 0x17, 0x34, 0x3d, 0xb6, 0x26, 0xd3, 0xd6, - 0xe6, 0xa4, 0x7d, 0x7e, 0xbe, 0xef, 0x60, 0x72, 0x02, 0xca, 0x1a, 0x2c, 0x26, 0x2f, 0x5e, 0x66, - 0x5b, 0x4a, 0x77, 0x4f, 0x86, 0x02, 0x1a, 0x11, 0x4a, 0x1d, 0xe6, 0xfb, 0xd4, 0xe9, 0xa8, 0x59, - 0x49, 0x5e, 0x8e, 0xc9, 0xfc, 0x53, 0xea, 0x74, 0x90, 0xf4, 0x08, 0xc2, 0xc1, 0x36, 0x91, 0x77, - 0x62, 0x8c, 0x10, 0x6f, 0x1d, 0x49, 0x4f, 0xe3, 0x0b, 0x80, 0x4b, 0xf1, 0x7d, 0x1a, 0xe9, 0x81, - 0x99, 0x7a, 0x1b, 0x10, 0x62, 0x8f, 0xee, 0x13, 0xc6, 0xa9, 0xeb, 0xc4, 0xfb, 0x8e, 0x6e, 0xfa, - 0xe6, 0x5e, 0x2b, 0xf6, 0xa0, 0x31, 0x6a, 0x7e, 0x0e, 0x8a, 0x01, 0x4b, 0xe2, 0xcb, 0x3d, 0x6c, - 0x12, 0x35, 0x2f, 0xb1, 0xe5, 0x18, 0x2b, 0xed, 0x24, 0x0e, 0x94, 0x32, 0x4d, 0xfd, 0xe8, 0x54, - 0xcb, 0x1c, 0x9f, 0x6a, 0x99, 0x93, 0x53, 0x2d, 0xf3, 0x3e, 0xd4, 0xc0, 0x51, 0xa8, 0x81, 0xe3, - 0x50, 0x03, 0x27, 0xa1, 0x06, 0x7e, 0x86, 0x1a, 0xf8, 0xf8, 0x4b, 0xcb, 0xbc, 0x2a, 0x26, 0xcd, - 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x38, 0x05, 0x46, 0x58, 0x0a, 0x00, 0x00, + // 844 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x65, 0x15, 0x6e, 0x38, 0x21, 0x6b, 0x85, 0x9c, 0xc5, 0x02, + 0xe9, 0x10, 0x87, 0xcd, 0xee, 0x21, 0xa0, 0xa1, 0x58, 0x5f, 0x81, 0x16, 0x96, 0xbd, 0x65, 0x4e, + 0x5c, 0x81, 0x28, 0x98, 0x38, 0x73, 0xce, 0x10, 0xdb, 0x63, 0xcd, 0x8c, 0x23, 0x9d, 0x68, 0x68, + 0x68, 0x11, 0x0d, 0x05, 0x3d, 0x2d, 0x0d, 0x94, 0xfc, 0x03, 0x4b, 0x77, 0xe5, 0x56, 0x11, 0x6b, + 0xfe, 0x10, 0x90, 0xc7, 0x76, 0xec, 0xfc, 0x22, 0xa9, 0x22, 0x21, 0x51, 0x25, 0xf3, 0xde, 0xf7, + 0xbe, 0xf7, 0xde, 0x37, 0xf3, 0x9e, 0xe1, 0xd9, 0xf8, 0x03, 0x69, 0x33, 0xee, 0x8c, 0x93, 0x01, + 0x15, 0x11, 0x55, 0x54, 0x3a, 0x13, 0x1a, 0x0d, 0xb9, 0x70, 0x0a, 0x07, 0x89, 0x99, 0x23, 0x06, + 0xc4, 0x73, 0x26, 0x27, 0x24, 0x88, 0x47, 0xe4, 0xc4, 0xf1, 0x69, 0x44, 0x05, 0x51, 0x74, 0x68, + 0xc7, 0x82, 0x2b, 0x8e, 0x8c, 0x1c, 0x69, 0x93, 0x98, 0xd9, 0x19, 0xd2, 0x2e, 0x91, 0x47, 0x6f, + 0xfb, 0x4c, 0x8d, 0x92, 0x81, 0xed, 0xf1, 0xd0, 0xf1, 0xb9, 0xcf, 0x1d, 0x1d, 0x30, 0x48, 0x9e, + 0xea, 0x93, 0x3e, 0xe8, 0x7f, 0x39, 0xd1, 0xd1, 0xbb, 0x55, 0xca, 0x90, 0x78, 0x23, 0x16, 0x51, + 0xf1, 0xcc, 0x89, 0xc7, 0x7e, 0x66, 0x90, 0x4e, 0x48, 0x15, 0x71, 0x26, 0x4b, 0xe9, 0x8f, 0x9c, + 0x75, 0x51, 0x22, 0x89, 0x14, 0x0b, 0xe9, 0x52, 0xc0, 0x7b, 0x9b, 0x02, 0xa4, 0x37, 0xa2, 0x21, + 0x59, 0x8a, 0x7b, 0xb0, 0x2e, 0x2e, 0x51, 0x2c, 0x70, 0x58, 0xa4, 0xa4, 0x12, 0x8b, 0x41, 0xd6, + 0x4f, 0x00, 0xf6, 0xce, 0x7c, 0x5f, 0x50, 0x9f, 0x28, 0xc6, 0x23, 0x9c, 0x04, 0x14, 0x7d, 0x07, + 0xe0, 0x5d, 0x2f, 0x48, 0xa4, 0xa2, 0x02, 0xf3, 0x80, 0x3e, 0xa6, 0x01, 0xf5, 0x14, 0x17, 0xd2, + 0x00, 0xc7, 0x7b, 0xf7, 0x0e, 0x4e, 0x1f, 0xd8, 0x95, 0xa0, 0xb3, 0x44, 0x76, 0x3c, 0xf6, 0x33, + 0x83, 0xb4, 0x33, 0x1d, 0xec, 0xc9, 0x89, 0x7d, 0x41, 0x06, 0x34, 0x28, 0x63, 0xdd, 0x57, 0xaf, + 0xa7, 0xfd, 0x46, 0x3a, 0xed, 0xdf, 0x7d, 0xb8, 0x82, 0x18, 0xaf, 0x4c, 0x67, 0xfd, 0xdc, 0x84, + 0x07, 0x35, 0x38, 0xfa, 0x0a, 0x76, 0x32, 0xf2, 0x21, 0x51, 0xc4, 0x00, 0xc7, 0xe0, 0xde, 0xc1, + 0xe9, 0x3b, 0xdb, 0x95, 0xf2, 0x68, 0xf0, 0x35, 0xf5, 0xd4, 0xa7, 0x54, 0x11, 0x17, 0x15, 0x75, + 0xc0, 0xca, 0x86, 0x67, 0xac, 0xe8, 0x1c, 0xb6, 0x45, 0x12, 0x50, 0x69, 0x34, 0x75, 0xa7, 0xaf, + 0xdb, 0xeb, 0x9e, 0x8e, 0x7d, 0xc5, 0x03, 0xe6, 0x3d, 0xcb, 0xe4, 0x72, 0x0f, 0x0b, 0xca, 0x76, + 0x76, 0x92, 0x38, 0x67, 0x40, 0x23, 0xd8, 0x23, 0xf3, 0xba, 0x1a, 0x7b, 0xba, 0xe6, 0x37, 0xd7, + 0x93, 0x2e, 0x5c, 0x84, 0xfb, 0x72, 0x3a, 0xed, 0x2f, 0xde, 0x0e, 0x5e, 0xa4, 0xb5, 0x7e, 0x6c, + 0x42, 0x54, 0x93, 0xc9, 0x65, 0xd1, 0x90, 0x45, 0xfe, 0x0e, 0xd4, 0x7a, 0x04, 0x3b, 0x32, 0xd1, + 0x8e, 0x52, 0xb0, 0xd7, 0xd6, 0xf7, 0xf6, 0x38, 0x47, 0xba, 0x2f, 0x15, 0x94, 0x9d, 0xc2, 0x20, + 0xf1, 0x8c, 0x04, 0x5d, 0xc0, 0x7d, 0xc1, 0x03, 0x8a, 0xe9, 0xd3, 0x42, 0xab, 0x7f, 0xe1, 0xc3, + 0x39, 0xd0, 0xed, 0x15, 0x7c, 0xfb, 0x85, 0x01, 0x97, 0x14, 0xd6, 0x1f, 0x00, 0xbe, 0xb2, 0xac, + 0xcb, 0x05, 0x93, 0x0a, 0x7d, 0xb9, 0xa4, 0x8d, 0xbd, 0xe5, 0xa3, 0x66, 0x32, 0x57, 0x66, 0xd6, + 0x46, 0x69, 0xa9, 0xe9, 0xf2, 0x19, 0x6c, 0x33, 0x45, 0xc3, 0x52, 0x94, 0xfb, 0xeb, 0x9b, 0x58, + 0x2e, 0xaf, 0x7a, 0x4d, 0xe7, 0x19, 0x05, 0xce, 0x99, 0xac, 0xdf, 0x01, 0xec, 0xd5, 0xc0, 0x3b, + 0x68, 0xe2, 0xe3, 0xf9, 0x26, 0xde, 0xd8, 0xae, 0x89, 0xd5, 0xd5, 0xff, 0x0d, 0x20, 0xac, 0x06, + 0x06, 0xf5, 0x61, 0x7b, 0x42, 0xc5, 0x20, 0xdf, 0x27, 0x5d, 0xb7, 0x9b, 0xe1, 0x9f, 0x64, 0x06, + 0x9c, 0xdb, 0xd1, 0x5b, 0xb0, 0x4b, 0x62, 0xf6, 0x91, 0xe0, 0x49, 0x2c, 0x8d, 0x3d, 0x0d, 0x3a, + 0x4c, 0xa7, 0xfd, 0xee, 0xd9, 0xd5, 0x79, 0x6e, 0xc4, 0x95, 0x3f, 0x03, 0x0b, 0x2a, 0x79, 0x22, + 0x3c, 0x2a, 0x8d, 0x56, 0x05, 0xc6, 0xa5, 0x11, 0x57, 0x7e, 0xf4, 0x3e, 0x3c, 0x2c, 0x0f, 0x97, + 0x24, 0xa4, 0xd2, 0x68, 0xeb, 0x80, 0x3b, 0xe9, 0xb4, 0x7f, 0x88, 0xeb, 0x0e, 0x3c, 0x8f, 0x43, + 0x1f, 0xc2, 0x5e, 0xc4, 0xa3, 0x12, 0xf2, 0x39, 0xbe, 0x90, 0xc6, 0x0b, 0x3a, 0x54, 0xcf, 0xe8, + 0xe5, 0xbc, 0x0b, 0x2f, 0x62, 0xad, 0xdf, 0x00, 0x6c, 0xfd, 0xe7, 0x76, 0x98, 0xf5, 0x7d, 0x13, + 0x1e, 0xfc, 0xbf, 0x52, 0x6a, 0x2b, 0x25, 0x1b, 0xc3, 0xdd, 0xee, 0x92, 0xed, 0xc7, 0x70, 0xf3, + 0x12, 0xf9, 0x05, 0xc0, 0xce, 0x8e, 0xb6, 0xc7, 0xc3, 0xf9, 0xb2, 0xcd, 0x0d, 0x65, 0xaf, 0xae, + 0xf7, 0x1b, 0x58, 0xde, 0x00, 0xba, 0x0f, 0x3b, 0xe5, 0xc4, 0xeb, 0x6a, 0xbb, 0x55, 0xf6, 0x72, + 0x29, 0xe0, 0x19, 0x02, 0x1d, 0xc3, 0xd6, 0x98, 0x45, 0x43, 0xa3, 0xa9, 0x91, 0x2f, 0x16, 0xc8, + 0xd6, 0x27, 0x2c, 0x1a, 0x62, 0xed, 0xc9, 0x10, 0x11, 0x09, 0xf3, 0x4f, 0x72, 0x0d, 0x91, 0xcd, + 0x3a, 0xd6, 0x1e, 0xeb, 0x57, 0x00, 0xf7, 0x8b, 0xf7, 0x34, 0xe3, 0x03, 0x6b, 0xf9, 0x4e, 0x21, + 0x24, 0x31, 0x7b, 0x42, 0x85, 0x64, 0x3c, 0x2a, 0xf2, 0xce, 0x5e, 0xfa, 0xd9, 0xd5, 0x79, 0xe1, + 0xc1, 0x35, 0xd4, 0xe6, 0x1a, 0x90, 0x03, 0xbb, 0xd9, 0xaf, 0x8c, 0x89, 0x47, 0x8d, 0x96, 0x86, + 0xdd, 0x29, 0x60, 0xdd, 0xcb, 0xd2, 0x81, 0x2b, 0x8c, 0x6b, 0x5f, 0xdf, 0x9a, 0x8d, 0xe7, 0xb7, + 0x66, 0xe3, 0xe6, 0xd6, 0x6c, 0x7c, 0x9b, 0x9a, 0xe0, 0x3a, 0x35, 0xc1, 0xf3, 0xd4, 0x04, 0x37, + 0xa9, 0x09, 0xfe, 0x4c, 0x4d, 0xf0, 0xc3, 0x5f, 0x66, 0xe3, 0x8b, 0x4e, 0x29, 0xfe, 0x3f, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xeb, 0xcc, 0xe2, 0x61, 0x5e, 0x0b, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 950995f4..28a4ae3d 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -29,6 +29,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +45,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -181,3 +195,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go index 9cfd71f4..0c697768 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/register.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index 06fa6ce8..843d998e 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -172,6 +172,20 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index d58a722a..e56cd0f1 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1alpha1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index e4cab832..abbb994f 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index bb7d47df..4b77c9c6 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 9cb4935c..8cb2c4be 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -25,6 +25,7 @@ limitations under the License. k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto It has these top-level messages: + AggregationRule ClusterRole ClusterRoleBinding ClusterRoleBindingList @@ -43,6 +44,8 @@ import proto "github.com/gogo/protobuf/proto" import fmt "fmt" import math "math" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + import strings "strings" import reflect "reflect" @@ -59,51 +62,56 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + func (m *ClusterRole) Reset() { *m = ClusterRole{} } func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *PolicyRule) Reset() { *m = PolicyRule{} } func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func (m *Role) Reset() { *m = Role{} } func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *RoleBinding) Reset() { *m = RoleBinding{} } func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *RoleList) Reset() { *m = RoleList{} } func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RoleRef) Reset() { *m = RoleRef{} } func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *Subject) Reset() { *m = Subject{} } func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") @@ -116,6 +124,36 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, msg := range m.ClusterRoleSelectors { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -151,6 +189,16 @@ func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { i += n } } + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } return i, nil } @@ -172,11 +220,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n2, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n2 + i += n3 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -192,11 +240,11 @@ func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n3, err := m.RoleRef.MarshalTo(dAtA[i:]) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n4 return i, nil } @@ -218,11 +266,11 @@ func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n5 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -256,11 +304,11 @@ func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n6 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -387,11 +435,11 @@ func (m *Role) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n7 if len(m.Rules) > 0 { for _, msg := range m.Rules { dAtA[i] = 0x12 @@ -425,11 +473,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n8 if len(m.Subjects) > 0 { for _, msg := range m.Subjects { dAtA[i] = 0x12 @@ -445,11 +493,11 @@ func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n8, err := m.RoleRef.MarshalTo(dAtA[i:]) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n9 return i, nil } @@ -471,11 +519,11 @@ func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n9 + i += n10 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -509,11 +557,11 @@ func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n10 + i += n11 if len(m.Items) > 0 { for _, msg := range m.Items { dAtA[i] = 0x12 @@ -620,6 +668,18 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return offset + 1 } +func (m *AggregationRule) Size() (n int) { + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + func (m *ClusterRole) Size() (n int) { var l int _ = l @@ -631,6 +691,10 @@ func (m *ClusterRole) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -811,6 +875,16 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} func (this *ClusterRole) String() string { if this == nil { return "nil" @@ -818,6 +892,7 @@ func (this *ClusterRole) String() string { s := strings.Join([]string{`&ClusterRole{`, `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -948,6 +1023,87 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ClusterRole) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1038,6 +1194,39 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2504,52 +2693,58 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 751 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x94, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xc7, 0xe3, 0x7c, 0x28, 0xf1, 0xe4, 0x46, 0xb9, 0xf5, 0x95, 0xee, 0xb5, 0x2a, 0x5d, 0x27, - 0x0a, 0x2c, 0x2a, 0x95, 0xda, 0xb4, 0x20, 0x60, 0x83, 0x04, 0x66, 0x01, 0x55, 0x4b, 0xa8, 0x06, - 0xc1, 0x02, 0xb1, 0x60, 0xe2, 0x4c, 0xd3, 0x21, 0xf1, 0x87, 0x66, 0xc6, 0x91, 0x2a, 0x36, 0x3c, - 0x00, 0x0b, 0x24, 0x5e, 0x83, 0x15, 0x3b, 0x78, 0x82, 0x2c, 0xbb, 0xec, 0x2a, 0xa2, 0xe6, 0x41, - 0x40, 0x33, 0xb6, 0xe3, 0xa4, 0x69, 0xda, 0xac, 0x22, 0x21, 0xb1, 0x4a, 0xe6, 0x9c, 0xdf, 0xf9, - 0x9f, 0x0f, 0xcf, 0x1c, 0xf0, 0xa0, 0x7f, 0x8f, 0x99, 0xc4, 0xb7, 0xfa, 0x61, 0x07, 0x53, 0x0f, - 0x73, 0xcc, 0xac, 0x21, 0xf6, 0xba, 0x3e, 0xb5, 0x12, 0x07, 0x0a, 0x88, 0x45, 0x3b, 0xc8, 0xb1, - 0x86, 0xdb, 0x1d, 0xcc, 0xd1, 0xb6, 0xd5, 0xc3, 0x1e, 0xa6, 0x88, 0xe3, 0xae, 0x19, 0x50, 0x9f, - 0xfb, 0xda, 0x7f, 0x31, 0x68, 0xa2, 0x80, 0x98, 0x02, 0x34, 0x13, 0x70, 0x7d, 0xab, 0x47, 0xf8, - 0x51, 0xd8, 0x31, 0x1d, 0xdf, 0xb5, 0x7a, 0x7e, 0xcf, 0xb7, 0x24, 0xdf, 0x09, 0x0f, 0xe5, 0x49, - 0x1e, 0xe4, 0xbf, 0x58, 0x67, 0xfd, 0x76, 0x96, 0xd0, 0x45, 0xce, 0x11, 0xf1, 0x30, 0x3d, 0xb6, - 0x82, 0x7e, 0x4f, 0x18, 0x98, 0xe5, 0x62, 0x8e, 0xac, 0xe1, 0x5c, 0xf6, 0x75, 0x6b, 0x51, 0x14, - 0x0d, 0x3d, 0x4e, 0x5c, 0x3c, 0x17, 0x70, 0xe7, 0xaa, 0x00, 0xe6, 0x1c, 0x61, 0x17, 0xcd, 0xc5, - 0xdd, 0x5a, 0x14, 0x17, 0x72, 0x32, 0xb0, 0x88, 0xc7, 0x19, 0xa7, 0xe7, 0x83, 0x5a, 0x5f, 0x15, - 0x50, 0x7d, 0x34, 0x08, 0x19, 0xc7, 0x14, 0xfa, 0x03, 0xac, 0xbd, 0x01, 0x15, 0xd1, 0x48, 0x17, - 0x71, 0xa4, 0x2b, 0x4d, 0x65, 0xa3, 0xba, 0x73, 0xd3, 0xcc, 0xc6, 0x37, 0xd1, 0x35, 0x83, 0x7e, - 0x4f, 0x18, 0x98, 0x29, 0x68, 0x73, 0xb8, 0x6d, 0x3e, 0xeb, 0xbc, 0xc5, 0x0e, 0x7f, 0x8a, 0x39, - 0xb2, 0xb5, 0xd1, 0xb8, 0x91, 0x8b, 0xc6, 0x0d, 0x90, 0xd9, 0xe0, 0x44, 0x55, 0x7b, 0x02, 0x4a, - 0x34, 0x1c, 0x60, 0xa6, 0xe7, 0x9b, 0x85, 0x8d, 0xea, 0xce, 0x35, 0x73, 0xc1, 0xd7, 0x31, 0x0f, - 0xfc, 0x01, 0x71, 0x8e, 0x61, 0x38, 0xc0, 0x76, 0x2d, 0x51, 0x2c, 0x89, 0x13, 0x83, 0xb1, 0x40, - 0xeb, 0x53, 0x1e, 0x68, 0x53, 0xb5, 0xdb, 0xc4, 0xeb, 0x12, 0xaf, 0xb7, 0x82, 0x16, 0xda, 0xa0, - 0xc2, 0x42, 0xe9, 0x48, 0xbb, 0x68, 0x2e, 0xec, 0xe2, 0x79, 0x0c, 0xda, 0x7f, 0x27, 0x8a, 0x95, - 0xc4, 0xc0, 0xe0, 0x44, 0x43, 0xdb, 0x03, 0x65, 0xea, 0x0f, 0x30, 0xc4, 0x87, 0x7a, 0x41, 0x16, - 0xbc, 0x58, 0x0e, 0xc6, 0x9c, 0x5d, 0x4f, 0xe4, 0xca, 0x89, 0x01, 0xa6, 0x0a, 0xad, 0x91, 0x02, - 0xfe, 0x9d, 0x9f, 0xca, 0x3e, 0x61, 0x5c, 0x7b, 0x3d, 0x37, 0x19, 0x73, 0xb9, 0xc9, 0x88, 0x68, - 0x39, 0x97, 0x49, 0x17, 0xa9, 0x65, 0x6a, 0x2a, 0x07, 0xa0, 0x44, 0x38, 0x76, 0xd3, 0x91, 0x6c, - 0x2e, 0xec, 0x61, 0xbe, 0xba, 0xec, 0x03, 0xef, 0x0a, 0x05, 0x18, 0x0b, 0xb5, 0xbe, 0x29, 0xa0, - 0x3e, 0x05, 0xaf, 0xa0, 0x87, 0xdd, 0xd9, 0x1e, 0xae, 0x2f, 0xd5, 0xc3, 0xc5, 0xc5, 0xff, 0x54, - 0x00, 0xc8, 0xae, 0xb0, 0xd6, 0x00, 0xa5, 0x21, 0xa6, 0x1d, 0xa6, 0x2b, 0xcd, 0xc2, 0x86, 0x6a, - 0xab, 0x82, 0x7f, 0x29, 0x0c, 0x30, 0xb6, 0x6b, 0x9b, 0x40, 0x45, 0x01, 0x79, 0x4c, 0xfd, 0x30, - 0x88, 0xd3, 0xab, 0x76, 0x2d, 0x1a, 0x37, 0xd4, 0x87, 0x07, 0xbb, 0xb1, 0x11, 0x66, 0x7e, 0x01, - 0x53, 0xcc, 0xfc, 0x90, 0x3a, 0x98, 0xe9, 0x85, 0x0c, 0x86, 0xa9, 0x11, 0x66, 0x7e, 0xed, 0x2e, - 0xa8, 0xa5, 0x87, 0x36, 0x72, 0x31, 0xd3, 0x8b, 0x32, 0x60, 0x2d, 0x1a, 0x37, 0x6a, 0x70, 0xda, - 0x01, 0x67, 0x39, 0xed, 0x3e, 0xa8, 0x7b, 0xbe, 0x97, 0x22, 0x2f, 0xe0, 0x3e, 0xd3, 0x4b, 0x32, - 0xf4, 0x9f, 0x68, 0xdc, 0xa8, 0xb7, 0x67, 0x5d, 0xf0, 0x3c, 0xdb, 0xfa, 0xa2, 0x80, 0xe2, 0x6f, - 0xb7, 0x54, 0x3e, 0xe4, 0x41, 0xf5, 0xcf, 0x36, 0x99, 0x6c, 0x13, 0xf1, 0x04, 0x57, 0xbb, 0x46, - 0x96, 0x7e, 0x82, 0x57, 0xef, 0x8f, 0xcf, 0x0a, 0xa8, 0xac, 0x68, 0x71, 0xd8, 0xb3, 0x55, 0xff, - 0x7f, 0x79, 0xd5, 0x17, 0x97, 0xfb, 0x0e, 0xa4, 0xf3, 0xd7, 0x6e, 0x80, 0x4a, 0xfa, 0xd8, 0x65, - 0xb1, 0x6a, 0x96, 0x3c, 0xdd, 0x07, 0x70, 0x42, 0x68, 0x4d, 0x50, 0xec, 0x13, 0xaf, 0xab, 0xe7, - 0x25, 0xf9, 0x57, 0x42, 0x16, 0xf7, 0x88, 0xd7, 0x85, 0xd2, 0x23, 0x08, 0x0f, 0xb9, 0x58, 0x5e, - 0x88, 0x29, 0x42, 0x3c, 0x73, 0x28, 0x3d, 0x62, 0x56, 0xe5, 0xe4, 0x32, 0x4d, 0xf4, 0x94, 0x85, - 0x7a, 0xd3, 0xf5, 0xe5, 0x97, 0xa9, 0xef, 0xf2, 0xec, 0x9a, 0x05, 0x54, 0xf1, 0xcb, 0x02, 0xe4, - 0x60, 0xbd, 0x28, 0xb1, 0xb5, 0x04, 0x53, 0xdb, 0xa9, 0x03, 0x66, 0x8c, 0xbd, 0x35, 0x3a, 0x33, - 0x72, 0x27, 0x67, 0x46, 0xee, 0xf4, 0xcc, 0xc8, 0xbd, 0x8f, 0x0c, 0x65, 0x14, 0x19, 0xca, 0x49, - 0x64, 0x28, 0xa7, 0x91, 0xa1, 0x7c, 0x8f, 0x0c, 0xe5, 0xe3, 0x0f, 0x23, 0xf7, 0xaa, 0x9c, 0x4c, - 0xfd, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0x24, 0x6a, 0xfa, 0x45, 0x0a, 0x00, 0x00, + // 833 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x6d, 0x3c, 0xcb, 0x2a, 0xdc, 0x70, 0x02, 0x6b, 0x05, 0xce, 0x2a, 0x50, + 0x44, 0x3a, 0xce, 0x66, 0xef, 0x10, 0xd0, 0x20, 0x71, 0xa6, 0x80, 0xd5, 0x2d, 0x61, 0x35, 0x27, + 0x28, 0x10, 0x05, 0x63, 0x67, 0xce, 0x19, 0xe2, 0x5f, 0x9a, 0x19, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x51, 0x20, 0x51, 0xd1, 0x52, 0x53, 0x51, 0xf2, 0x17, 0xa4, 0xbc, 0xf2, 0xaa, 0x88, 0x35, 0x7f, + 0x08, 0x68, 0xfc, 0x23, 0x4e, 0xe2, 0xf8, 0x2e, 0x55, 0x24, 0xa4, 0xab, 0x76, 0xe7, 0xbd, 0xef, + 0x7d, 0xef, 0x7b, 0x9f, 0x67, 0x5e, 0xe0, 0x27, 0xb3, 0x8f, 0x84, 0xc9, 0x22, 0x6b, 0x96, 0x38, + 0x94, 0x87, 0x54, 0x52, 0x61, 0xcd, 0x69, 0x38, 0x89, 0xb8, 0x55, 0x24, 0x48, 0xcc, 0x2c, 0xee, + 0x10, 0xd7, 0x9a, 0x5f, 0x38, 0x54, 0x92, 0x0b, 0xcb, 0xa3, 0x21, 0xe5, 0x44, 0xd2, 0x89, 0x19, + 0xf3, 0x48, 0x46, 0xe8, 0x8d, 0x1c, 0x68, 0x92, 0x98, 0x99, 0x0a, 0x68, 0x16, 0xc0, 0xb3, 0xbb, + 0x1e, 0x93, 0xd3, 0xc4, 0x31, 0xdd, 0x28, 0xb0, 0xbc, 0xc8, 0x8b, 0xac, 0x0c, 0xef, 0x24, 0x8f, + 0xb3, 0x53, 0x76, 0xc8, 0xfe, 0xcb, 0x79, 0xce, 0x46, 0xf5, 0x86, 0xc4, 0x8f, 0xa7, 0xf5, 0x8e, + 0x67, 0xef, 0x57, 0xc8, 0x80, 0xb8, 0x53, 0x16, 0x52, 0xfe, 0xc4, 0x8a, 0x67, 0x9e, 0x0a, 0x08, + 0x2b, 0xa0, 0x92, 0x58, 0xf3, 0x7a, 0x95, 0xd5, 0x54, 0xc5, 0x93, 0x50, 0xb2, 0x80, 0xd6, 0x0a, + 0x3e, 0x78, 0x51, 0x81, 0x70, 0xa7, 0x34, 0x20, 0xb5, 0xba, 0xfb, 0x4d, 0x75, 0x89, 0x64, 0xbe, + 0xc5, 0x42, 0x29, 0x24, 0xdf, 0x2e, 0x1a, 0xfe, 0x06, 0x60, 0xff, 0x81, 0xe7, 0x71, 0xea, 0x11, + 0xc9, 0xa2, 0x10, 0x27, 0x3e, 0x45, 0x3f, 0x01, 0x78, 0xdb, 0xf5, 0x13, 0x21, 0x29, 0xc7, 0x91, + 0x4f, 0x1f, 0x51, 0x9f, 0xba, 0x32, 0xe2, 0x42, 0x07, 0xe7, 0x47, 0xa3, 0x93, 0x7b, 0xf7, 0xcd, + 0xca, 0xf9, 0x55, 0x23, 0x33, 0x9e, 0x79, 0x2a, 0x20, 0x4c, 0xe5, 0x83, 0x39, 0xbf, 0x30, 0xaf, + 0x88, 0x43, 0xfd, 0xb2, 0xd6, 0x7e, 0x73, 0xb1, 0x1c, 0xb4, 0xd2, 0xe5, 0xe0, 0xf6, 0xa7, 0x3b, + 0x88, 0xf1, 0xce, 0x76, 0xc3, 0xdf, 0xdb, 0xf0, 0x64, 0x0d, 0x8e, 0xbe, 0x83, 0x3d, 0x45, 0x3e, + 0x21, 0x92, 0xe8, 0xe0, 0x1c, 0x8c, 0x4e, 0xee, 0xbd, 0xb7, 0x9f, 0x94, 0x2f, 0x9d, 0xef, 0xa9, + 0x2b, 0xbf, 0xa0, 0x92, 0xd8, 0xa8, 0xd0, 0x01, 0xab, 0x18, 0x5e, 0xb1, 0xa2, 0xcf, 0x61, 0x97, + 0x27, 0x3e, 0x15, 0x7a, 0x3b, 0x9b, 0xf4, 0x6d, 0xb3, 0xe1, 0x8e, 0x99, 0xd7, 0x91, 0xcf, 0xdc, + 0x27, 0xca, 0x2d, 0xfb, 0xb4, 0x60, 0xec, 0xaa, 0x93, 0xc0, 0x39, 0x01, 0xf2, 0x60, 0x9f, 0x6c, + 0xda, 0xaa, 0x1f, 0x65, 0x92, 0x47, 0x8d, 0x9c, 0x5b, 0x9f, 0xc1, 0x7e, 0x2d, 0x5d, 0x0e, 0xb6, + 0xbf, 0x0d, 0xde, 0x66, 0x1d, 0xfe, 0xda, 0x86, 0x68, 0xcd, 0x24, 0x9b, 0x85, 0x13, 0x16, 0x7a, + 0x07, 0xf0, 0x6a, 0x0c, 0x7b, 0x22, 0xc9, 0x12, 0xa5, 0x5d, 0xe7, 0x8d, 0xa3, 0x3d, 0xca, 0x81, + 0xf6, 0xab, 0x05, 0x63, 0xaf, 0x08, 0x08, 0xbc, 0xe2, 0x40, 0x0f, 0xe1, 0x31, 0x8f, 0x7c, 0x8a, + 0xe9, 0xe3, 0xc2, 0xa9, 0x66, 0x3a, 0x9c, 0xe3, 0xec, 0x7e, 0x41, 0x77, 0x5c, 0x04, 0x70, 0xc9, + 0x30, 0x5c, 0x00, 0xf8, 0x7a, 0xdd, 0x95, 0x2b, 0x26, 0x24, 0xfa, 0xb6, 0xe6, 0x8c, 0xb9, 0xe7, + 0x85, 0x66, 0x22, 0xf7, 0x65, 0x35, 0x45, 0x19, 0x59, 0x73, 0xe5, 0x1a, 0x76, 0x99, 0xa4, 0x41, + 0x69, 0xc9, 0x9d, 0xc6, 0x19, 0xea, 0xea, 0xaa, 0x9b, 0x74, 0xa9, 0x18, 0x70, 0x4e, 0x34, 0xfc, + 0x0b, 0xc0, 0xfe, 0x1a, 0xf8, 0x00, 0x33, 0x5c, 0x6e, 0xce, 0xf0, 0xce, 0x5e, 0x33, 0xec, 0x16, + 0xff, 0x2f, 0x80, 0xb0, 0x7a, 0x2b, 0x68, 0x00, 0xbb, 0x73, 0xca, 0x9d, 0x7c, 0x93, 0x68, 0xb6, + 0xa6, 0xf0, 0x5f, 0xab, 0x00, 0xce, 0xe3, 0xe8, 0x0e, 0xd4, 0x48, 0xcc, 0x3e, 0xe3, 0x51, 0x12, + 0xe7, 0xed, 0x35, 0xfb, 0x34, 0x5d, 0x0e, 0xb4, 0x07, 0xd7, 0x97, 0x79, 0x10, 0x57, 0x79, 0x05, + 0xe6, 0x54, 0x44, 0x09, 0x77, 0xa9, 0xd0, 0x8f, 0x2a, 0x30, 0x2e, 0x83, 0xb8, 0xca, 0xa3, 0x0f, + 0xe1, 0x69, 0x79, 0x18, 0x93, 0x80, 0x0a, 0xbd, 0x93, 0x15, 0xdc, 0x4a, 0x97, 0x83, 0x53, 0xbc, + 0x9e, 0xc0, 0x9b, 0x38, 0xf4, 0x31, 0xec, 0x87, 0x51, 0x58, 0x42, 0xbe, 0xc2, 0x57, 0x42, 0xef, + 0x66, 0xa5, 0xd9, 0xfb, 0x1c, 0x6f, 0xa6, 0xf0, 0x36, 0x76, 0xf8, 0x27, 0x80, 0x9d, 0xff, 0xdb, + 0xf6, 0x1a, 0xfe, 0xdc, 0x86, 0x27, 0x2f, 0xb7, 0xc9, 0x6a, 0x9b, 0xa8, 0x27, 0x78, 0xd8, 0x35, + 0xb2, 0xf7, 0x13, 0x7c, 0xf1, 0xfe, 0xf8, 0x03, 0xc0, 0xde, 0x81, 0x16, 0x87, 0xbd, 0xa9, 0xfa, + 0xad, 0xe7, 0xab, 0xde, 0x2d, 0xf7, 0x07, 0x58, 0xfa, 0x8f, 0xde, 0x85, 0xbd, 0xf2, 0xb1, 0x67, + 0x62, 0xb5, 0xaa, 0x79, 0xb9, 0x0f, 0xf0, 0x0a, 0x81, 0xce, 0x61, 0x67, 0xc6, 0xc2, 0x89, 0xde, + 0xce, 0x90, 0xaf, 0x14, 0xc8, 0xce, 0x43, 0x16, 0x4e, 0x70, 0x96, 0x51, 0x88, 0x90, 0x04, 0xf9, + 0x0f, 0xf1, 0x1a, 0x42, 0x3d, 0x73, 0x9c, 0x65, 0x94, 0x57, 0xc7, 0xc5, 0x65, 0x5a, 0xf1, 0x81, + 0x46, 0xbe, 0x75, 0x7d, 0xed, 0x7d, 0xf4, 0x3d, 0xbf, 0x3b, 0xb2, 0xa0, 0xa6, 0xfe, 0x8a, 0x98, + 0xb8, 0x54, 0xef, 0x64, 0xb0, 0x5b, 0x05, 0x4c, 0x1b, 0x97, 0x09, 0x5c, 0x61, 0xec, 0xbb, 0x8b, + 0x1b, 0xa3, 0xf5, 0xf4, 0xc6, 0x68, 0x3d, 0xbb, 0x31, 0x5a, 0x3f, 0xa6, 0x06, 0x58, 0xa4, 0x06, + 0x78, 0x9a, 0x1a, 0xe0, 0x59, 0x6a, 0x80, 0xbf, 0x53, 0x03, 0xfc, 0xf2, 0x8f, 0xd1, 0xfa, 0xe6, + 0xb8, 0x70, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd1, 0x99, 0xaf, 0xff, 0x74, 0x0b, 0x00, + 0x00, } diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index fe92c3c0..975de109 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -21,6 +21,7 @@ syntax = 'proto2'; package k8s.io.api.rbac.v1beta1; +import "k8s.io/api/rbac/v1alpha1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,6 +30,14 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. message ClusterRole { // Standard object's metadata. @@ -37,6 +46,12 @@ message ClusterRole { // Rules holds all the PolicyRules for this ClusterRole repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; } // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, @@ -180,3 +195,4 @@ message Subject { // +optional optional string namespace = 4; } + diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go index 8f60f019..c8526a65 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/register.go +++ b/vendor/k8s.io/api/rbac/v1beta1/register.go @@ -40,7 +40,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &Role{}, diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index ee3964a3..091fc1dc 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -171,6 +171,19 @@ type ClusterRole struct { // Rules holds all the PolicyRules for this ClusterRole Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` } // +genclient diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 16a265c5..6180d6d4 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -27,10 +27,20 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", } func (ClusterRole) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 92272764..ac238956 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -21,66 +21,31 @@ limitations under the License. package v1beta1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return } -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRole).DeepCopyInto(out.(*ClusterRole)) - return nil - }, InType: reflect.TypeOf(&ClusterRole{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBinding).DeepCopyInto(out.(*ClusterRoleBinding)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleBindingList).DeepCopyInto(out.(*ClusterRoleBindingList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClusterRoleList).DeepCopyInto(out.(*ClusterRoleList)) - return nil - }, InType: reflect.TypeOf(&ClusterRoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PolicyRule).DeepCopyInto(out.(*PolicyRule)) - return nil - }, InType: reflect.TypeOf(&PolicyRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Role).DeepCopyInto(out.(*Role)) - return nil - }, InType: reflect.TypeOf(&Role{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBinding).DeepCopyInto(out.(*RoleBinding)) - return nil - }, InType: reflect.TypeOf(&RoleBinding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleBindingList).DeepCopyInto(out.(*RoleBindingList)) - return nil - }, InType: reflect.TypeOf(&RoleBindingList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleList).DeepCopyInto(out.(*RoleList)) - return nil - }, InType: reflect.TypeOf(&RoleList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RoleRef).DeepCopyInto(out.(*RoleRef)) - return nil - }, InType: reflect.TypeOf(&RoleRef{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Subject).DeepCopyInto(out.(*Subject)) - return nil - }, InType: reflect.TypeOf(&Subject{})}, - ) +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -95,6 +60,15 @@ func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + if *in == nil { + *out = nil + } else { + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index bf901598..e10d07ff 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 0f51edaf..625cae7b 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -62,3 +62,4 @@ message PriorityClassList { // items is the list of PriorityClasses repeated PriorityClass items = 2; } + diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go index 91ce6e0c..24689f0a 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/register.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PriorityClass{}, diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index a3ae5c0f..fad4db66 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,32 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClass).DeepCopyInto(out.(*PriorityClass)) - return nil - }, InType: reflect.TypeOf(&PriorityClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PriorityClassList).DeepCopyInto(out.(*PriorityClassList)) - return nil - }, InType: reflect.TypeOf(&PriorityClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index ae1bfb91..05a62c56 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto index 5550d9a2..430319d7 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -73,3 +73,4 @@ message PodPresetSpec { // +optional repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; } + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go index 2f39af0f..eee278d9 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ b/vendor/k8s.io/api/settings/v1alpha1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &PodPreset{}, diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index 07e6ab72..2c925622 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -22,36 +22,9 @@ package v1alpha1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPreset).DeepCopyInto(out.(*PodPreset)) - return nil - }, InType: reflect.TypeOf(&PodPreset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetList).DeepCopyInto(out.(*PodPresetList)) - return nil - }, InType: reflect.TypeOf(&PodPresetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPresetSpec).DeepCopyInto(out.(*PodPresetSpec)) - return nil - }, InType: reflect.TypeOf(&PodPresetSpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodPreset) DeepCopyInto(out *PodPreset) { *out = *in diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 0672c58e..8f4a4045 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 4befedff..7157b72f 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,43 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 593 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x6f, 0xd3, 0x3e, - 0x18, 0xc6, 0x9b, 0x76, 0xfd, 0x6b, 0x73, 0x37, 0xfd, 0xab, 0x30, 0xa4, 0xa8, 0x87, 0xb4, 0x1a, - 0x97, 0x0a, 0x09, 0x7b, 0xdd, 0x06, 0x9a, 0x90, 0x40, 0xa2, 0x68, 0x12, 0x48, 0x9b, 0x56, 0x05, - 0x89, 0x03, 0xe2, 0x80, 0x9b, 0xbd, 0x64, 0x26, 0x89, 0x1d, 0xd9, 0x4e, 0xa0, 0x37, 0x3e, 0x02, - 0x9f, 0x87, 0x13, 0xc7, 0x1d, 0x77, 0xdc, 0x29, 0x62, 0xe1, 0x5b, 0x70, 0x42, 0x49, 0xca, 0x92, - 0xad, 0x9d, 0xd8, 0xcd, 0x7e, 0xdf, 0xe7, 0xf7, 0xd8, 0x7e, 0xfd, 0xa0, 0xe7, 0xfe, 0xbe, 0xc2, - 0x4c, 0x10, 0x3f, 0x9e, 0x82, 0xe4, 0xa0, 0x41, 0x91, 0x04, 0xf8, 0x89, 0x90, 0x64, 0xde, 0xa0, - 0x11, 0x23, 0x4a, 0x0b, 0x49, 0x3d, 0x20, 0xc9, 0x88, 0x78, 0xc0, 0x41, 0x52, 0x0d, 0x27, 0x38, - 0x92, 0x42, 0x0b, 0xf3, 0x7e, 0x29, 0xc3, 0x34, 0x62, 0x78, 0x2e, 0xc3, 0xc9, 0xa8, 0xf7, 0xc8, - 0x63, 0xfa, 0x34, 0x9e, 0x62, 0x57, 0x84, 0xc4, 0x13, 0x9e, 0x20, 0x85, 0x7a, 0x1a, 0x7f, 0x2c, - 0x76, 0xc5, 0xa6, 0x58, 0x95, 0x2e, 0xbd, 0x87, 0x4b, 0x0f, 0x9b, 0x82, 0xa6, 0x0b, 0x27, 0xf6, - 0xf6, 0x2a, 0x6d, 0x48, 0xdd, 0x53, 0xc6, 0x41, 0xce, 0x48, 0xe4, 0x7b, 0x79, 0x41, 0x91, 0x10, - 0x34, 0x5d, 0x72, 0xcf, 0x1e, 0xb9, 0x8d, 0x92, 0x31, 0xd7, 0x2c, 0x84, 0x05, 0xe0, 0xc9, 0xbf, - 0x00, 0xe5, 0x9e, 0x42, 0x48, 0x17, 0xb8, 0xdd, 0xdb, 0xb8, 0x58, 0xb3, 0x80, 0x30, 0xae, 0x95, - 0x96, 0x37, 0xa1, 0xad, 0x1f, 0x2b, 0x68, 0xfd, 0x4d, 0xf9, 0xee, 0x97, 0x01, 0x55, 0xca, 0xfc, - 0x80, 0x56, 0xf3, 0x97, 0x9c, 0x50, 0x4d, 0x2d, 0x63, 0x60, 0x0c, 0x3b, 0x3b, 0xdb, 0xb8, 0x9a, - 0xf4, 0x95, 0x31, 0x8e, 0x7c, 0x2f, 0x2f, 0x28, 0x9c, 0xab, 0x71, 0x32, 0xc2, 0xc7, 0xd3, 0x4f, - 0xe0, 0xea, 0x23, 0xd0, 0x74, 0x6c, 0x9e, 0xa5, 0xfd, 0x46, 0x96, 0xf6, 0x51, 0x55, 0x73, 0xae, - 0x5c, 0xcd, 0xc7, 0xa8, 0x13, 0x49, 0x91, 0x30, 0xc5, 0x04, 0x07, 0x69, 0x35, 0x07, 0xc6, 0x70, - 0x6d, 0x7c, 0x6f, 0x8e, 0x74, 0x26, 0x55, 0xcb, 0xa9, 0xeb, 0x4c, 0x0f, 0xa1, 0x88, 0x4a, 0x1a, - 0x82, 0x06, 0xa9, 0xac, 0xd6, 0xa0, 0x35, 0xec, 0xec, 0xec, 0xe2, 0xa5, 0x21, 0xc0, 0xf5, 0x17, - 0xe1, 0xc9, 0x15, 0x75, 0xc0, 0xb5, 0x9c, 0x55, 0xb7, 0xab, 0x1a, 0x4e, 0xcd, 0xda, 0xf4, 0xd1, - 0x86, 0x04, 0x37, 0xa0, 0x2c, 0x9c, 0x88, 0x80, 0xb9, 0x33, 0x6b, 0xa5, 0xb8, 0xe1, 0x41, 0x96, - 0xf6, 0x37, 0x9c, 0x7a, 0xe3, 0x77, 0xda, 0xdf, 0xae, 0xc5, 0xc7, 0x15, 0x32, 0xcf, 0x0e, 0x9e, - 0x80, 0x54, 0x4c, 0x69, 0xe0, 0xfa, 0xad, 0x08, 0xe2, 0x10, 0xae, 0x31, 0xce, 0x75, 0x6f, 0x73, - 0x0f, 0xad, 0x87, 0x22, 0xe6, 0xfa, 0x38, 0xd2, 0x4c, 0x70, 0x65, 0xb5, 0x07, 0xad, 0xe1, 0xda, - 0xb8, 0x9b, 0xa5, 0xfd, 0xf5, 0xa3, 0x5a, 0xdd, 0xb9, 0xa6, 0x32, 0x0f, 0xd1, 0x26, 0x0d, 0x02, - 0xf1, 0xb9, 0x3c, 0xe0, 0xe0, 0x4b, 0x44, 0x79, 0x3e, 0x25, 0xeb, 0xbf, 0x81, 0x31, 0x5c, 0x1d, - 0x5b, 0x59, 0xda, 0xdf, 0x7c, 0xb1, 0xa4, 0xef, 0x2c, 0xa5, 0x7a, 0xcf, 0xd0, 0xff, 0x37, 0x66, - 0x64, 0x76, 0x51, 0xcb, 0x87, 0x59, 0x11, 0x80, 0x35, 0x27, 0x5f, 0x9a, 0x9b, 0xa8, 0x9d, 0xd0, - 0x20, 0x86, 0xf2, 0xbf, 0x9c, 0x72, 0xf3, 0xb4, 0xb9, 0x6f, 0x6c, 0x7d, 0x37, 0x50, 0xb7, 0x3e, - 0xf0, 0x43, 0xa6, 0xb4, 0xf9, 0x7e, 0x21, 0x46, 0xf8, 0x6e, 0x31, 0xca, 0xe9, 0x22, 0x44, 0xdd, - 0xf9, 0x37, 0xad, 0xfe, 0xad, 0xd4, 0x22, 0xf4, 0x0a, 0xb5, 0x99, 0x86, 0x50, 0x59, 0xcd, 0x22, - 0x06, 0x0f, 0xee, 0x10, 0x83, 0xf1, 0xc6, 0xdc, 0xaf, 0xfd, 0x3a, 0x27, 0x9d, 0xd2, 0x60, 0x3c, - 0x3c, 0xbb, 0xb4, 0x1b, 0xe7, 0x97, 0x76, 0xe3, 0xe2, 0xd2, 0x6e, 0x7c, 0xcd, 0x6c, 0xe3, 0x2c, - 0xb3, 0x8d, 0xf3, 0xcc, 0x36, 0x2e, 0x32, 0xdb, 0xf8, 0x99, 0xd9, 0xc6, 0xb7, 0x5f, 0x76, 0xe3, - 0x5d, 0x33, 0x19, 0xfd, 0x09, 0x00, 0x00, 0xff, 0xff, 0xbb, 0x57, 0xe7, 0x15, 0xb0, 0x04, 0x00, - 0x00, + // 623 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x52, 0x4f, 0x6f, 0xd3, 0x3c, + 0x18, 0x6f, 0xda, 0xb7, 0x2f, 0x9b, 0xbb, 0x89, 0x2e, 0x0c, 0x29, 0xea, 0x21, 0xa9, 0xc6, 0xa5, + 0x9a, 0x84, 0xb3, 0x6e, 0x03, 0x4d, 0x48, 0x20, 0x11, 0x34, 0x09, 0xa4, 0x4d, 0xab, 0x82, 0x34, + 0x21, 0xc4, 0x01, 0x37, 0x7d, 0xc8, 0x4c, 0x13, 0x3b, 0xb2, 0x9d, 0x40, 0x6f, 0x7c, 0x04, 0xce, + 0x7c, 0x14, 0x3e, 0xc1, 0x8e, 0x3b, 0xee, 0x14, 0xb1, 0xf0, 0x2d, 0x76, 0x42, 0x49, 0xca, 0x9a, + 0xad, 0x9d, 0xd8, 0x2d, 0xfe, 0xfd, 0xb3, 0x9f, 0x27, 0x3f, 0xf4, 0x62, 0xbc, 0x27, 0x31, 0xe5, + 0xf6, 0x38, 0x1e, 0x82, 0x60, 0xa0, 0x40, 0xda, 0x09, 0xb0, 0x11, 0x17, 0xf6, 0x94, 0x20, 0x11, + 0xb5, 0xa5, 0xe2, 0x82, 0xf8, 0x60, 0x27, 0x7d, 0xdb, 0x07, 0x06, 0x82, 0x28, 0x18, 0xe1, 0x48, + 0x70, 0xc5, 0xf5, 0x87, 0xa5, 0x0c, 0x93, 0x88, 0xe2, 0xa9, 0x0c, 0x27, 0xfd, 0xce, 0x63, 0x9f, + 0xaa, 0x93, 0x78, 0x88, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x5d, 0xa8, 0x87, 0xf1, 0xa7, 0xe2, + 0x54, 0x1c, 0x8a, 0xaf, 0x32, 0xa5, 0xb3, 0xb9, 0xf0, 0xb2, 0x21, 0x28, 0x32, 0x77, 0x63, 0x67, + 0x77, 0xa6, 0x0d, 0x89, 0x77, 0x42, 0x19, 0x88, 0x89, 0x1d, 0x8d, 0xfd, 0x1c, 0x90, 0x76, 0x08, + 0x8a, 0x2c, 0x78, 0x67, 0xc7, 0xbe, 0xcd, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, 0x86, 0xa7, 0xff, + 0x32, 0x48, 0xef, 0x04, 0x42, 0x32, 0xe7, 0xdb, 0xb9, 0xcd, 0x17, 0x2b, 0x1a, 0xd8, 0x94, 0x29, + 0xa9, 0xc4, 0x4d, 0xd3, 0xc6, 0x8f, 0x26, 0x5a, 0x79, 0x5b, 0xce, 0xfd, 0x2a, 0x20, 0x52, 0xea, + 0x1f, 0xd1, 0x52, 0x3e, 0xc9, 0x88, 0x28, 0x62, 0x68, 0x5d, 0xad, 0xd7, 0xda, 0xde, 0xc2, 0xb3, + 0x4d, 0x5f, 0x05, 0xe3, 0x68, 0xec, 0xe7, 0x80, 0xc4, 0xb9, 0x1a, 0x27, 0x7d, 0x7c, 0x34, 0xfc, + 0x0c, 0x9e, 0x3a, 0x04, 0x45, 0x1c, 0xfd, 0x34, 0xb5, 0x6a, 0x59, 0x6a, 0xa1, 0x19, 0xe6, 0x5e, + 0xa5, 0xea, 0x4f, 0x50, 0x2b, 0x12, 0x3c, 0xa1, 0x92, 0x72, 0x06, 0xc2, 0xa8, 0x77, 0xb5, 0xde, + 0xb2, 0xf3, 0x60, 0x6a, 0x69, 0x0d, 0x66, 0x94, 0x5b, 0xd5, 0xe9, 0x3e, 0x42, 0x11, 0x11, 0x24, + 0x04, 0x05, 0x42, 0x1a, 0x8d, 0x6e, 0xa3, 0xd7, 0xda, 0xde, 0xc1, 0x0b, 0x4b, 0x80, 0xab, 0x13, + 0xe1, 0xc1, 0x95, 0x6b, 0x9f, 0x29, 0x31, 0x99, 0xbd, 0x6e, 0x46, 0xb8, 0x95, 0x68, 0x7d, 0x8c, + 0x56, 0x05, 0x78, 0x01, 0xa1, 0xe1, 0x80, 0x07, 0xd4, 0x9b, 0x18, 0xff, 0x15, 0x2f, 0xdc, 0xcf, + 0x52, 0x6b, 0xd5, 0xad, 0x12, 0x97, 0xa9, 0xb5, 0x55, 0xa9, 0x8f, 0xc7, 0x45, 0xde, 0x1d, 0x3c, + 0x00, 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x31, 0x0f, 0xe2, 0x10, 0xae, 0x79, 0xdc, 0xeb, 0xd9, 0xfa, + 0x2e, 0x5a, 0x09, 0x79, 0xcc, 0xd4, 0x51, 0xa4, 0x28, 0x67, 0xd2, 0x68, 0x76, 0x1b, 0xbd, 0x65, + 0xa7, 0x9d, 0xa5, 0xd6, 0xca, 0x61, 0x05, 0x77, 0xaf, 0xa9, 0xf4, 0x03, 0xb4, 0x4e, 0x82, 0x80, + 0x7f, 0x29, 0x2f, 0xd8, 0xff, 0x1a, 0x11, 0x96, 0x6f, 0xc9, 0xf8, 0xbf, 0xab, 0xf5, 0x96, 0x1c, + 0x23, 0x4b, 0xad, 0xf5, 0x97, 0x0b, 0x78, 0x77, 0xa1, 0x4b, 0x7f, 0x87, 0xd6, 0x92, 0x02, 0x72, + 0x28, 0x1b, 0x51, 0xe6, 0x1f, 0xf2, 0x11, 0x18, 0xf7, 0x8a, 0xa1, 0x37, 0xb3, 0xd4, 0x5a, 0x3b, + 0xbe, 0x49, 0x5e, 0x2e, 0x02, 0xdd, 0xf9, 0x90, 0xce, 0x73, 0x74, 0xff, 0xc6, 0xf6, 0xf5, 0x36, + 0x6a, 0x8c, 0x61, 0x52, 0x54, 0x6b, 0xd9, 0xcd, 0x3f, 0xf5, 0x75, 0xd4, 0x4c, 0x48, 0x10, 0x43, + 0xd9, 0x04, 0xb7, 0x3c, 0x3c, 0xab, 0xef, 0x69, 0x1b, 0x3f, 0x35, 0xd4, 0xae, 0xfe, 0xca, 0x03, + 0x2a, 0x95, 0xfe, 0x61, 0xae, 0xa0, 0xf8, 0x6e, 0x05, 0xcd, 0xdd, 0x45, 0x3d, 0xdb, 0xd3, 0x02, + 0x2c, 0xfd, 0x45, 0x2a, 0xe5, 0x7c, 0x8d, 0x9a, 0x54, 0x41, 0x28, 0x8d, 0x7a, 0x51, 0xb0, 0x47, + 0x77, 0x28, 0x98, 0xb3, 0x3a, 0xcd, 0x6b, 0xbe, 0xc9, 0x9d, 0x6e, 0x19, 0xe0, 0xf4, 0x4e, 0x2f, + 0xcc, 0xda, 0xd9, 0x85, 0x59, 0x3b, 0xbf, 0x30, 0x6b, 0xdf, 0x32, 0x53, 0x3b, 0xcd, 0x4c, 0xed, + 0x2c, 0x33, 0xb5, 0xf3, 0xcc, 0xd4, 0x7e, 0x65, 0xa6, 0xf6, 0xfd, 0xb7, 0x59, 0x7b, 0x5f, 0x4f, + 0xfa, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xee, 0x56, 0xcc, 0xfd, 0x0a, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index 7556e090..939ebde6 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. @@ -75,3 +82,4 @@ message StorageClassList { // Items is the list of StorageClasses repeated StorageClass items = 2; } + diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 59c4e59b..c058add8 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 9afdafb6..288d40ab 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index b4be857d..3eb9bdab 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index 50c707ab..a2b2f8e7 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1 import ( core_v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go new file mode 100644 index 00000000..aa94aff7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package,register +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true +package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go new file mode 100644 index 00000000..3d55c6ec --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -0,0 +1,1523 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + + It has these top-level messages: + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } + +func init() { + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") +} +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n8, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x3d, 0x6f, 0xdb, 0x46, + 0x18, 0xc7, 0x45, 0x49, 0xb6, 0xe5, 0x53, 0x5f, 0x8c, 0x83, 0xd0, 0x0a, 0x2a, 0x40, 0x19, 0x9a, + 0xdc, 0xa2, 0x3e, 0x56, 0x76, 0x51, 0x18, 0xdd, 0x44, 0xd8, 0x43, 0x51, 0xcb, 0x2d, 0xe8, 0xa2, + 0x43, 0xdb, 0xa1, 0x27, 0xf2, 0x31, 0x45, 0x4b, 0x7c, 0xc1, 0xdd, 0x51, 0x88, 0xb7, 0x4c, 0x99, + 0xb3, 0xe5, 0x1b, 0xe4, 0xb3, 0x68, 0x8b, 0x47, 0x4f, 0x42, 0xcc, 0x7c, 0x8b, 0x2c, 0x09, 0x78, + 0x3c, 0x89, 0xb2, 0x29, 0x25, 0xb6, 0x37, 0x3e, 0xcf, 0x3d, 0xff, 0xdf, 0xf3, 0x76, 0x47, 0x74, + 0x3c, 0x3a, 0xe2, 0xc4, 0x0b, 0x8d, 0x51, 0x3c, 0x00, 0x16, 0x80, 0x00, 0x6e, 0x4c, 0x20, 0x70, + 0x42, 0x66, 0xa8, 0x03, 0x1a, 0x79, 0x06, 0x17, 0x21, 0xa3, 0x2e, 0x18, 0x93, 0x2e, 0x1d, 0x47, + 0x43, 0xda, 0x35, 0x5c, 0x08, 0x80, 0x51, 0x01, 0x0e, 0x89, 0x58, 0x28, 0x42, 0xfc, 0x5d, 0x16, + 0x4c, 0x68, 0xe4, 0x11, 0x15, 0x4c, 0xe6, 0xc1, 0xad, 0x7d, 0xd7, 0x13, 0xc3, 0x78, 0x40, 0xec, + 0xd0, 0x37, 0xdc, 0xd0, 0x0d, 0x0d, 0xa9, 0x19, 0xc4, 0x17, 0xd2, 0x92, 0x86, 0xfc, 0xca, 0x58, + 0xad, 0x7e, 0x9e, 0x18, 0x9e, 0x09, 0x08, 0xb8, 0x17, 0x06, 0x7c, 0x9f, 0x46, 0x1e, 0x07, 0x36, + 0x01, 0x66, 0x44, 0x23, 0x37, 0x3d, 0xe3, 0x77, 0x03, 0x8c, 0x49, 0x77, 0x00, 0xa2, 0x58, 0x5a, + 0xeb, 0xe7, 0x1c, 0xe7, 0x53, 0x7b, 0xe8, 0x05, 0xc0, 0xae, 0x72, 0x86, 0x0f, 0x82, 0x1a, 0x93, + 0xa2, 0xca, 0x58, 0xa7, 0x62, 0x71, 0x20, 0x3c, 0x1f, 0x0a, 0x82, 0x5f, 0x3e, 0x27, 0xe0, 0xf6, + 0x10, 0x7c, 0x5a, 0xd0, 0x1d, 0xae, 0xd3, 0xc5, 0xc2, 0x1b, 0x1b, 0x5e, 0x20, 0xb8, 0x60, 0xf7, + 0x45, 0x9d, 0xd7, 0x65, 0xb4, 0xf3, 0x77, 0x38, 0x8e, 0x7d, 0xe8, 0x09, 0x41, 0xed, 0xa1, 0x0f, + 0x81, 0xc0, 0xff, 0xa3, 0x5a, 0xda, 0x8d, 0x43, 0x05, 0x6d, 0x6a, 0xbb, 0xda, 0x5e, 0xfd, 0xe0, + 0x27, 0x92, 0xaf, 0x65, 0x01, 0x27, 0xd1, 0xc8, 0x4d, 0x1d, 0x9c, 0xa4, 0xd1, 0x64, 0xd2, 0x25, + 0x7f, 0x0c, 0x2e, 0xc1, 0x16, 0x7d, 0x10, 0xd4, 0xc4, 0xd3, 0x59, 0xbb, 0x94, 0xcc, 0xda, 0x28, + 0xf7, 0x59, 0x0b, 0x2a, 0x3e, 0x47, 0x55, 0x1e, 0x81, 0xdd, 0x2c, 0x4b, 0x7a, 0x97, 0x7c, 0x62, + 0xe9, 0xe4, 0x7e, 0x79, 0xe7, 0x11, 0xd8, 0xe6, 0x17, 0x0a, 0x5f, 0x4d, 0x2d, 0x4b, 0xc2, 0xf0, + 0xbf, 0x68, 0x93, 0x0b, 0x2a, 0x62, 0xde, 0xac, 0x48, 0xec, 0xe1, 0xe3, 0xb0, 0x52, 0x6a, 0x7e, + 0xa5, 0xc0, 0x9b, 0x99, 0x6d, 0x29, 0x64, 0x67, 0xaa, 0xa1, 0xc6, 0x7d, 0xc9, 0xa9, 0xc7, 0x05, + 0xfe, 0xaf, 0x30, 0x2c, 0xf2, 0xb0, 0x61, 0xa5, 0x6a, 0x39, 0xaa, 0x1d, 0x95, 0xb2, 0x36, 0xf7, + 0x2c, 0x0d, 0xca, 0x42, 0x1b, 0x9e, 0x00, 0x9f, 0x37, 0xcb, 0xbb, 0x95, 0xbd, 0xfa, 0xc1, 0xfe, + 0xa3, 0x5a, 0x32, 0xbf, 0x54, 0xe4, 0x8d, 0xdf, 0x52, 0x86, 0x95, 0xa1, 0x3a, 0x17, 0xe8, 0x9b, + 0x42, 0xf3, 0x61, 0xcc, 0x6c, 0xc0, 0xa7, 0xa8, 0x11, 0x01, 0xe3, 0x1e, 0x17, 0x10, 0x88, 0x2c, + 0xe6, 0x8c, 0xfa, 0x20, 0xfb, 0xda, 0x36, 0x9b, 0xc9, 0xac, 0xdd, 0xf8, 0x73, 0xc5, 0xb9, 0xb5, + 0x52, 0xd5, 0x79, 0xb3, 0x62, 0x64, 0xe9, 0xba, 0xf0, 0x8f, 0xa8, 0x46, 0xa5, 0x07, 0x98, 0x42, + 0x2f, 0x46, 0xd0, 0x53, 0x7e, 0x6b, 0x11, 0x21, 0xd7, 0x2a, 0xcb, 0x53, 0xb7, 0xe5, 0x91, 0x6b, + 0x95, 0xd2, 0xa5, 0xb5, 0x4a, 0xdb, 0x52, 0xc8, 0xb4, 0x94, 0x20, 0x74, 0xb2, 0x2e, 0x2b, 0x77, + 0x4b, 0x39, 0x53, 0x7e, 0x6b, 0x11, 0xd1, 0xf9, 0x50, 0x59, 0x31, 0x3a, 0x79, 0x3f, 0x96, 0x7a, + 0x72, 0x64, 0x4f, 0xb5, 0x42, 0x4f, 0xce, 0xa2, 0x27, 0x07, 0xbf, 0xd2, 0x10, 0xa6, 0x0b, 0x44, + 0x7f, 0x7e, 0x7f, 0xb2, 0x25, 0xff, 0xfe, 0x84, 0x7b, 0x4b, 0x7a, 0x05, 0xda, 0x49, 0x20, 0xd8, + 0x95, 0xd9, 0x52, 0x55, 0xe0, 0x62, 0x80, 0xb5, 0xa2, 0x04, 0x7c, 0x89, 0xea, 0x99, 0xf7, 0x84, + 0xb1, 0x90, 0xa9, 0x97, 0xb4, 0xf7, 0x80, 0x8a, 0x64, 0xbc, 0xa9, 0x27, 0xb3, 0x76, 0xbd, 0x97, + 0x03, 0xde, 0xcf, 0xda, 0xf5, 0xa5, 0x73, 0x6b, 0x19, 0x9e, 0xe6, 0x72, 0x20, 0xcf, 0x55, 0x7d, + 0x4a, 0xae, 0x63, 0x58, 0x9f, 0x6b, 0x09, 0xde, 0x3a, 0x41, 0xdf, 0xae, 0x19, 0x11, 0xde, 0x41, + 0x95, 0x11, 0x5c, 0x65, 0x37, 0xd1, 0x4a, 0x3f, 0x71, 0x03, 0x6d, 0x4c, 0xe8, 0x38, 0xce, 0x6e, + 0xdc, 0xb6, 0x95, 0x19, 0xbf, 0x96, 0x8f, 0xb4, 0xce, 0x0b, 0x0d, 0x2d, 0xe7, 0xc0, 0xa7, 0xa8, + 0x9a, 0xfe, 0x93, 0xd5, 0xcb, 0xff, 0xe1, 0x61, 0x2f, 0xff, 0x2f, 0xcf, 0x87, 0xfc, 0x0f, 0x96, + 0x5a, 0x96, 0xa4, 0xe0, 0xef, 0xd1, 0x96, 0x0f, 0x9c, 0x53, 0x57, 0x65, 0x36, 0xbf, 0x56, 0x41, + 0x5b, 0xfd, 0xcc, 0x6d, 0xcd, 0xcf, 0x4d, 0x32, 0xbd, 0xd5, 0x4b, 0xd7, 0xb7, 0x7a, 0xe9, 0xe6, + 0x56, 0x2f, 0x3d, 0x4f, 0x74, 0x6d, 0x9a, 0xe8, 0xda, 0x75, 0xa2, 0x6b, 0x37, 0x89, 0xae, 0xbd, + 0x4d, 0x74, 0xed, 0xe5, 0x3b, 0xbd, 0xf4, 0x4f, 0x6d, 0x3e, 0xb8, 0x8f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x68, 0x82, 0x7b, 0x73, 0x9e, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto new file mode 100644 index 00000000..c9421cf0 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -0,0 +1,128 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1alpha1; + +import "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go new file mode 100644 index 00000000..7b81ee49 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VolumeAttachment{}, + &VolumeAttachmentList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go new file mode 100644 index 00000000..964bb5f7 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // Placeholder for *VolumeSource to accommodate inline volumes in pods. +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000..faca8e93 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000..19ae9485 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,188 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + if *in == nil { + *out = nil + } else { + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index 6137d7a4..8957a4cf 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index b31d6f12..f2c8ea96 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -146,6 +146,12 @@ func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } return i, nil } @@ -242,6 +248,10 @@ func (m *StorageClass) Size() (n int) { if m.AllowVolumeExpansion != nil { n += 2 } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } return n } @@ -293,6 +303,7 @@ func (this *StorageClass) String() string { `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, `}`, }, "") return s @@ -600,6 +611,36 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := bool(v != 0) m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -842,42 +883,44 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 589 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x4f, 0xd4, 0x40, - 0x18, 0xc6, 0xb7, 0x2c, 0xab, 0x30, 0x0b, 0x71, 0x53, 0x39, 0x34, 0x7b, 0xe8, 0x6e, 0x38, 0xf5, - 0xc2, 0x0c, 0x20, 0x1a, 0x62, 0xe2, 0xc1, 0x12, 0x0e, 0x26, 0x10, 0x36, 0x35, 0xf1, 0x60, 0x3c, - 0x38, 0x5b, 0x5e, 0xcb, 0xd8, 0x76, 0xa6, 0x99, 0x99, 0xae, 0xee, 0xcd, 0x8f, 0xe0, 0x37, 0xf2, - 0x64, 0xc2, 0x91, 0x23, 0xa7, 0x46, 0xea, 0xb7, 0xf0, 0x64, 0xfa, 0x47, 0x5a, 0x58, 0x88, 0xdc, - 0x3a, 0xef, 0xfb, 0xfc, 0x9e, 0xb7, 0xf3, 0xce, 0x83, 0x0e, 0xc2, 0x7d, 0x85, 0x99, 0x20, 0x61, - 0x3a, 0x05, 0xc9, 0x41, 0x83, 0x22, 0x33, 0xe0, 0xa7, 0x42, 0x92, 0xba, 0x41, 0x13, 0x46, 0x94, - 0x16, 0x92, 0x06, 0x40, 0x66, 0x3b, 0x53, 0xd0, 0x74, 0x87, 0x04, 0xc0, 0x41, 0x52, 0x0d, 0xa7, - 0x38, 0x91, 0x42, 0x0b, 0x73, 0x58, 0x69, 0x31, 0x4d, 0x18, 0xae, 0xb5, 0xb8, 0xd6, 0x0e, 0xb7, - 0x02, 0xa6, 0xcf, 0xd2, 0x29, 0xf6, 0x45, 0x4c, 0x02, 0x11, 0x08, 0x52, 0x22, 0xd3, 0xf4, 0x53, - 0x79, 0x2a, 0x0f, 0xe5, 0x57, 0x65, 0x35, 0xdc, 0x6c, 0x8d, 0xf5, 0x85, 0x2c, 0x66, 0xde, 0x1e, - 0x37, 0xdc, 0x6b, 0x34, 0x31, 0xf5, 0xcf, 0x18, 0x07, 0x39, 0x27, 0x49, 0x18, 0x14, 0x05, 0x45, - 0x62, 0xd0, 0xf4, 0x2e, 0x8a, 0xdc, 0x47, 0xc9, 0x94, 0x6b, 0x16, 0xc3, 0x02, 0xf0, 0xe2, 0x7f, - 0x80, 0xf2, 0xcf, 0x20, 0xa6, 0x0b, 0xdc, 0xb3, 0xfb, 0xb8, 0x54, 0xb3, 0x88, 0x30, 0xae, 0x95, - 0x96, 0xb7, 0xa1, 0xcd, 0x9f, 0xcb, 0x68, 0xed, 0x6d, 0xb5, 0xba, 0x83, 0x88, 0x2a, 0x65, 0x7e, - 0x44, 0x2b, 0xc5, 0x4d, 0x4e, 0xa9, 0xa6, 0x96, 0x31, 0x36, 0x9c, 0xfe, 0xee, 0x36, 0x6e, 0xd6, - 0x7c, 0x6d, 0x8c, 0x93, 0x30, 0x28, 0x0a, 0x0a, 0x17, 0x6a, 0x3c, 0xdb, 0xc1, 0x27, 0xd3, 0xcf, - 0xe0, 0xeb, 0x63, 0xd0, 0xd4, 0x35, 0xcf, 0xb3, 0x51, 0x27, 0xcf, 0x46, 0xa8, 0xa9, 0x79, 0xd7, - 0xae, 0xe6, 0x73, 0xd4, 0x4f, 0xa4, 0x98, 0x31, 0xc5, 0x04, 0x07, 0x69, 0x2d, 0x8d, 0x0d, 0x67, - 0xd5, 0x7d, 0x5a, 0x23, 0xfd, 0x49, 0xd3, 0xf2, 0xda, 0x3a, 0x33, 0x42, 0x28, 0xa1, 0x92, 0xc6, - 0xa0, 0x41, 0x2a, 0xab, 0x3b, 0xee, 0x3a, 0xfd, 0xdd, 0x7d, 0x7c, 0x7f, 0x02, 0x70, 0xfb, 0x5a, - 0x78, 0x72, 0x8d, 0x1e, 0x72, 0x2d, 0xe7, 0xcd, 0x2f, 0x36, 0x0d, 0xaf, 0xe5, 0x6f, 0x86, 0x68, - 0x5d, 0x82, 0x1f, 0x51, 0x16, 0x4f, 0x44, 0xc4, 0xfc, 0xb9, 0xb5, 0x5c, 0xfe, 0xe6, 0x61, 0x9e, - 0x8d, 0xd6, 0xbd, 0x76, 0xe3, 0x4f, 0x36, 0xda, 0x5e, 0xcc, 0x0e, 0x9e, 0x80, 0x54, 0x4c, 0x69, - 0xe0, 0xfa, 0x9d, 0x88, 0xd2, 0x18, 0x6e, 0x30, 0xde, 0x4d, 0x6f, 0x73, 0x0f, 0xad, 0xc5, 0x22, - 0xe5, 0xfa, 0x24, 0xd1, 0x4c, 0x70, 0x65, 0xf5, 0xc6, 0x5d, 0x67, 0xd5, 0x1d, 0xe4, 0xd9, 0x68, - 0xed, 0xb8, 0x55, 0xf7, 0x6e, 0xa8, 0xcc, 0x23, 0xb4, 0x41, 0xa3, 0x48, 0x7c, 0xa9, 0x06, 0x1c, - 0x7e, 0x4d, 0x28, 0x2f, 0x56, 0x65, 0x3d, 0x1a, 0x1b, 0xce, 0x8a, 0x6b, 0xe5, 0xd9, 0x68, 0xe3, - 0xf5, 0x1d, 0x7d, 0xef, 0x4e, 0x6a, 0xf8, 0x0a, 0x3d, 0xb9, 0xb5, 0x23, 0x73, 0x80, 0xba, 0x21, - 0xcc, 0xcb, 0x14, 0xac, 0x7a, 0xc5, 0xa7, 0xb9, 0x81, 0x7a, 0x33, 0x1a, 0xa5, 0x50, 0x3d, 0x9a, - 0x57, 0x1d, 0x5e, 0x2e, 0xed, 0x1b, 0x9b, 0x3f, 0x0c, 0x34, 0x68, 0x2f, 0xfc, 0x88, 0x29, 0x6d, - 0x7e, 0x58, 0xc8, 0x12, 0x7e, 0x58, 0x96, 0x0a, 0xba, 0x4c, 0xd2, 0xa0, 0x7e, 0xa6, 0x95, 0x7f, - 0x95, 0x56, 0x8e, 0x8e, 0x51, 0x8f, 0x69, 0x88, 0x95, 0xb5, 0x54, 0x66, 0xc1, 0x79, 0x68, 0x16, - 0xdc, 0xf5, 0xda, 0xb4, 0xf7, 0xa6, 0xc0, 0xbd, 0xca, 0xc5, 0xdd, 0x3a, 0xbf, 0xb2, 0x3b, 0x17, - 0x57, 0x76, 0xe7, 0xf2, 0xca, 0xee, 0x7c, 0xcb, 0x6d, 0xe3, 0x3c, 0xb7, 0x8d, 0x8b, 0xdc, 0x36, - 0x2e, 0x73, 0xdb, 0xf8, 0x95, 0xdb, 0xc6, 0xf7, 0xdf, 0x76, 0xe7, 0xfd, 0xe3, 0xda, 0xf1, 0x6f, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xae, 0x44, 0x72, 0xc1, 0x04, 0x00, 0x00, + // 622 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xcd, 0x6e, 0xd3, 0x40, + 0x14, 0x85, 0xe3, 0x86, 0xd0, 0x76, 0xd2, 0x8a, 0xd4, 0x74, 0x61, 0x65, 0x61, 0x47, 0x5d, 0x45, + 0x48, 0x1d, 0xb7, 0xa5, 0xa0, 0x0a, 0x89, 0x05, 0xae, 0xba, 0x40, 0x6a, 0xd4, 0xc8, 0x48, 0x15, + 0x42, 0x2c, 0x98, 0x38, 0x17, 0x77, 0x88, 0x3d, 0x63, 0xcd, 0x8c, 0x03, 0xd9, 0xf1, 0x08, 0xbc, + 0x01, 0x8f, 0xc2, 0xb6, 0xcb, 0x2e, 0xbb, 0xb2, 0xa8, 0x79, 0x8b, 0xae, 0x90, 0x7f, 0x68, 0xdc, + 0xfc, 0x88, 0xee, 0x3c, 0xf7, 0x9e, 0xef, 0xdc, 0x99, 0xeb, 0x83, 0x8e, 0x47, 0x47, 0x12, 0x53, + 0x6e, 0x8f, 0xe2, 0x01, 0x08, 0x06, 0x0a, 0xa4, 0x3d, 0x06, 0x36, 0xe4, 0xc2, 0x2e, 0x1b, 0x24, + 0xa2, 0xb6, 0x54, 0x5c, 0x10, 0x1f, 0xec, 0xf1, 0xfe, 0x00, 0x14, 0xd9, 0xb7, 0x7d, 0x60, 0x20, + 0x88, 0x82, 0x21, 0x8e, 0x04, 0x57, 0x5c, 0x6f, 0x17, 0x5a, 0x4c, 0x22, 0x8a, 0x4b, 0x2d, 0x2e, + 0xb5, 0xed, 0x5d, 0x9f, 0xaa, 0x8b, 0x78, 0x80, 0x3d, 0x1e, 0xda, 0x3e, 0xf7, 0xb9, 0x9d, 0x23, + 0x83, 0xf8, 0x73, 0x7e, 0xca, 0x0f, 0xf9, 0x57, 0x61, 0xd5, 0xde, 0xa9, 0x8c, 0xf5, 0xb8, 0xc8, + 0x66, 0xce, 0x8e, 0x6b, 0x1f, 0x4e, 0x35, 0x21, 0xf1, 0x2e, 0x28, 0x03, 0x31, 0xb1, 0xa3, 0x91, + 0x9f, 0x15, 0xa4, 0x1d, 0x82, 0x22, 0x8b, 0x28, 0x7b, 0x19, 0x25, 0x62, 0xa6, 0x68, 0x08, 0x73, + 0xc0, 0xcb, 0xff, 0x01, 0xd2, 0xbb, 0x80, 0x90, 0xcc, 0x71, 0xcf, 0x97, 0x71, 0xb1, 0xa2, 0x81, + 0x4d, 0x99, 0x92, 0x4a, 0xcc, 0x42, 0x3b, 0x3f, 0x1b, 0x68, 0xe3, 0x5d, 0xb1, 0xba, 0xe3, 0x80, + 0x48, 0xa9, 0x7f, 0x42, 0x6b, 0xd9, 0x4b, 0x86, 0x44, 0x11, 0x43, 0xeb, 0x68, 0xdd, 0xe6, 0xc1, + 0x1e, 0x9e, 0xae, 0xf9, 0xce, 0x18, 0x47, 0x23, 0x3f, 0x2b, 0x48, 0x9c, 0xa9, 0xf1, 0x78, 0x1f, + 0x9f, 0x0d, 0xbe, 0x80, 0xa7, 0x7a, 0xa0, 0x88, 0xa3, 0x5f, 0x26, 0x56, 0x2d, 0x4d, 0x2c, 0x34, + 0xad, 0xb9, 0x77, 0xae, 0xfa, 0x0b, 0xd4, 0x8c, 0x04, 0x1f, 0x53, 0x49, 0x39, 0x03, 0x61, 0xac, + 0x74, 0xb4, 0xee, 0xba, 0xf3, 0xb4, 0x44, 0x9a, 0xfd, 0x69, 0xcb, 0xad, 0xea, 0xf4, 0x00, 0xa1, + 0x88, 0x08, 0x12, 0x82, 0x02, 0x21, 0x8d, 0x7a, 0xa7, 0xde, 0x6d, 0x1e, 0x1c, 0xe1, 0xe5, 0x09, + 0xc0, 0xd5, 0x67, 0xe1, 0xfe, 0x1d, 0x7a, 0xc2, 0x94, 0x98, 0x4c, 0xaf, 0x38, 0x6d, 0xb8, 0x15, + 0x7f, 0x7d, 0x84, 0x36, 0x05, 0x78, 0x01, 0xa1, 0x61, 0x9f, 0x07, 0xd4, 0x9b, 0x18, 0x8f, 0xf2, + 0x6b, 0x9e, 0xa4, 0x89, 0xb5, 0xe9, 0x56, 0x1b, 0xb7, 0x89, 0xb5, 0x37, 0x9f, 0x1d, 0xdc, 0x07, + 0x21, 0xa9, 0x54, 0xc0, 0xd4, 0x39, 0x0f, 0xe2, 0x10, 0xee, 0x31, 0xee, 0x7d, 0x6f, 0xfd, 0x10, + 0x6d, 0x84, 0x3c, 0x66, 0xea, 0x2c, 0x52, 0x94, 0x33, 0x69, 0x34, 0x3a, 0xf5, 0xee, 0xba, 0xd3, + 0x4a, 0x13, 0x6b, 0xa3, 0x57, 0xa9, 0xbb, 0xf7, 0x54, 0xfa, 0x29, 0xda, 0x26, 0x41, 0xc0, 0xbf, + 0x16, 0x03, 0x4e, 0xbe, 0x45, 0x84, 0x65, 0xab, 0x32, 0x1e, 0x77, 0xb4, 0xee, 0x9a, 0x63, 0xa4, + 0x89, 0xb5, 0xfd, 0x66, 0x41, 0xdf, 0x5d, 0x48, 0xe9, 0xef, 0xd1, 0xd6, 0x38, 0x2f, 0x39, 0x94, + 0x0d, 0x29, 0xf3, 0x7b, 0x7c, 0x08, 0xc6, 0x6a, 0xfe, 0xe8, 0x67, 0x69, 0x62, 0x6d, 0x9d, 0xcf, + 0x36, 0x6f, 0x17, 0x15, 0xdd, 0x79, 0x93, 0xf6, 0x6b, 0xf4, 0x64, 0x66, 0xfb, 0x7a, 0x0b, 0xd5, + 0x47, 0x30, 0xc9, 0xf3, 0xb5, 0xee, 0x66, 0x9f, 0xfa, 0x36, 0x6a, 0x8c, 0x49, 0x10, 0x43, 0x11, + 0x07, 0xb7, 0x38, 0xbc, 0x5a, 0x39, 0xd2, 0x76, 0x7e, 0x69, 0xa8, 0x55, 0xfd, 0x95, 0xa7, 0x54, + 0x2a, 0xfd, 0xe3, 0x5c, 0x4a, 0xf1, 0xc3, 0x52, 0x9a, 0xd1, 0x79, 0x46, 0x5b, 0x65, 0x00, 0xd6, + 0xfe, 0x55, 0x2a, 0x09, 0xed, 0xa1, 0x06, 0x55, 0x10, 0x4a, 0x63, 0x25, 0x4f, 0x59, 0xf7, 0xa1, + 0x29, 0x73, 0x36, 0x4b, 0xd3, 0xc6, 0xdb, 0x0c, 0x77, 0x0b, 0x17, 0x67, 0xf7, 0xf2, 0xc6, 0xac, + 0x5d, 0xdd, 0x98, 0xb5, 0xeb, 0x1b, 0xb3, 0xf6, 0x3d, 0x35, 0xb5, 0xcb, 0xd4, 0xd4, 0xae, 0x52, + 0x53, 0xbb, 0x4e, 0x4d, 0xed, 0x77, 0x6a, 0x6a, 0x3f, 0xfe, 0x98, 0xb5, 0x0f, 0xab, 0xa5, 0xe3, + 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, 0xe2, 0x8e, 0x84, 0x1b, 0x05, 0x00, 0x00, } diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 24420f5e..b0e030c0 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1beta1"; // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { @@ -63,6 +63,13 @@ message StorageClass { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; } // StorageClassList is a collection of storage classes. @@ -75,3 +82,4 @@ message StorageClassList { // Items is the list of StorageClasses repeated StorageClass items = 2; } + diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index 759ba81a..7f1f0c8e 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -41,7 +41,7 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index e5036b55..7fb9ad98 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -59,6 +59,13 @@ type StorageClass struct { // AllowVolumeExpansion shows whether the storage class allow volume expand // +optional AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is alpha-level and is only honored by servers that enable + // the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -74,3 +81,18 @@ type StorageClassList struct { // Items is the list of StorageClasses Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` } + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index e2148c23..85886f7d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -35,6 +35,7 @@ var map_StorageClass = map[string]string{ "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is alpha-level and is only honored by servers that enable the VolumeScheduling feature.", } func (StorageClass) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index bf1f91a6..9d1e7982 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -22,32 +22,9 @@ package v1beta1 import ( v1 "k8s.io/api/core/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClass).DeepCopyInto(out.(*StorageClass)) - return nil - }, InType: reflect.TypeOf(&StorageClass{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageClassList).DeepCopyInto(out.(*StorageClassList)) - return nil - }, InType: reflect.TypeOf(&StorageClassList{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -83,6 +60,15 @@ func (in *StorageClass) DeepCopyInto(out *StorageClass) { **out = **in } } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + if *in == nil { + *out = nil + } else { + *out = new(VolumeBindingMode) + **out = **in + } + } return } diff --git a/vendor/k8s.io/apiextensions-apiserver/README.md b/vendor/k8s.io/apiextensions-apiserver/README.md index c9c84bae..be75b9ba 100644 --- a/vendor/k8s.io/apiextensions-apiserver/README.md +++ b/vendor/k8s.io/apiextensions-apiserver/README.md @@ -6,7 +6,7 @@ It provides an API for registering `CustomResourceDefinitions`. ## Purpose -This API server provides the implementation for `CustomResourceDefinitions` which is included as +This API server provides the implementation for `CustomResourceDefinitions` which is included as delegate server inside of `kube-apiserver`. diff --git a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go index fa78ce6a..80b37bf7 100644 --- a/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go +++ b/vendor/k8s.io/apiextensions-apiserver/pkg/features/kube_features.go @@ -29,6 +29,7 @@ const ( // owner: @sttts, @nikhita // alpha: v1.8 + // beta: v1.9 // // CustomResourceValidation is a list of validation methods for CustomResources CustomResourceValidation utilfeature.Feature = "CustomResourceValidation" @@ -42,5 +43,5 @@ func init() { // To add a new feature, define a key for it above and add it here. The features will be // available throughout Kubernetes binaries. var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureSpec{ - CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, + CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index d5503fac..9960600b 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -405,84 +405,84 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr // IsNotFound returns true if the specified error was created by NewNotFound. func IsNotFound(err error) bool { - return reasonForError(err) == metav1.StatusReasonNotFound + return ReasonForError(err) == metav1.StatusReasonNotFound } // IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. func IsAlreadyExists(err error) bool { - return reasonForError(err) == metav1.StatusReasonAlreadyExists + return ReasonForError(err) == metav1.StatusReasonAlreadyExists } // IsConflict determines if the err is an error which indicates the provided update conflicts. func IsConflict(err error) bool { - return reasonForError(err) == metav1.StatusReasonConflict + return ReasonForError(err) == metav1.StatusReasonConflict } // IsInvalid determines if the err is an error which indicates the provided resource is not valid. func IsInvalid(err error) bool { - return reasonForError(err) == metav1.StatusReasonInvalid + return ReasonForError(err) == metav1.StatusReasonInvalid } // IsGone is true if the error indicates the requested resource is no longer available. func IsGone(err error) bool { - return reasonForError(err) == metav1.StatusReasonGone + return ReasonForError(err) == metav1.StatusReasonGone } // IsResourceExpired is true if the error indicates the resource has expired and the current action is // no longer possible. func IsResourceExpired(err error) bool { - return reasonForError(err) == metav1.StatusReasonExpired + return ReasonForError(err) == metav1.StatusReasonExpired } // IsMethodNotSupported determines if the err is an error which indicates the provided action could not // be performed because it is not supported by the server. func IsMethodNotSupported(err error) bool { - return reasonForError(err) == metav1.StatusReasonMethodNotAllowed + return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed } // IsServiceUnavailable is true if the error indicates the underlying service is no longer available. func IsServiceUnavailable(err error) bool { - return reasonForError(err) == metav1.StatusReasonServiceUnavailable + return ReasonForError(err) == metav1.StatusReasonServiceUnavailable } // IsBadRequest determines if err is an error which indicates that the request is invalid. func IsBadRequest(err error) bool { - return reasonForError(err) == metav1.StatusReasonBadRequest + return ReasonForError(err) == metav1.StatusReasonBadRequest } // IsUnauthorized determines if err is an error which indicates that the request is unauthorized and // requires authentication by the user. func IsUnauthorized(err error) bool { - return reasonForError(err) == metav1.StatusReasonUnauthorized + return ReasonForError(err) == metav1.StatusReasonUnauthorized } // IsForbidden determines if err is an error which indicates that the request is forbidden and cannot // be completed as requested. func IsForbidden(err error) bool { - return reasonForError(err) == metav1.StatusReasonForbidden + return ReasonForError(err) == metav1.StatusReasonForbidden } // IsTimeout determines if err is an error which indicates that request times out due to long // processing. func IsTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonTimeout + return ReasonForError(err) == metav1.StatusReasonTimeout } // IsServerTimeout determines if err is an error which indicates that the request needs to be retried // by the client. func IsServerTimeout(err error) bool { - return reasonForError(err) == metav1.StatusReasonServerTimeout + return ReasonForError(err) == metav1.StatusReasonServerTimeout } // IsInternalError determines if err is an error which indicates an internal server error. func IsInternalError(err error) bool { - return reasonForError(err) == metav1.StatusReasonInternalError + return ReasonForError(err) == metav1.StatusReasonInternalError } // IsTooManyRequests determines if err is an error which indicates that there are too many requests // that the server cannot handle. func IsTooManyRequests(err error) bool { - if reasonForError(err) == metav1.StatusReasonTooManyRequests { + if ReasonForError(err) == metav1.StatusReasonTooManyRequests { return true } switch t := err.(type) { @@ -536,7 +536,8 @@ func SuggestsClientDelay(err error) (int, bool) { return 0, false } -func reasonForError(err error) metav1.StatusReason { +// ReasonForError returns the HTTP status for a particular error. +func ReasonForError(err error) metav1.StatusReason { switch t := err.(type) { case APIStatus: return t.Status().Reason diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go index bf172369..5dc9d89e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -75,6 +75,9 @@ type MetadataAccessor interface { Annotations(obj runtime.Object) (map[string]string, error) SetAnnotations(obj runtime.Object, annotations map[string]string) error + Continue(obj runtime.Object) (string, error) + SetContinue(obj runtime.Object, c string) error + runtime.ResourceVersioner } diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go new file mode 100644 index 00000000..7f92f39a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -0,0 +1,121 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// lazyObject defers loading the mapper and typer until necessary. +type lazyObject struct { + loader func() (RESTMapper, runtime.ObjectTyper, error) + + lock sync.Mutex + loaded bool + err error + mapper RESTMapper + typer runtime.ObjectTyper +} + +// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// returning those initialization errors when the interface methods are invoked. This defers the +// initialization and any server calls until a client actually needs to perform the action. +func NewLazyObjectLoader(fn func() (RESTMapper, runtime.ObjectTyper, error)) (RESTMapper, runtime.ObjectTyper) { + obj := &lazyObject{loader: fn} + return obj, obj +} + +// init lazily loads the mapper and typer, returning an error if initialization has failed. +func (o *lazyObject) init() error { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded { + return o.err + } + o.mapper, o.typer, o.err = o.loader() + o.loaded = true + return o.err +} + +var _ RESTMapper = &lazyObject{} +var _ runtime.ObjectTyper = &lazyObject{} + +func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return schema.GroupVersionKind{}, err + } + return o.mapper.KindFor(resource) +} + +func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionKind{}, err + } + return o.mapper.KindsFor(resource) +} + +func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return schema.GroupVersionResource{}, err + } + return o.mapper.ResourceFor(input) +} + +func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionResource{}, err + } + return o.mapper.ResourcesFor(input) +} + +func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMapping(gk, versions...) +} + +func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMappings(gk, versions...) +} + +func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { + if err := o.init(); err != nil { + return "", err + } + return o.mapper.ResourceSingularizer(resource) +} + +func (o *lazyObject) ObjectKinds(obj runtime.Object) ([]schema.GroupVersionKind, bool, error) { + if err := o.init(); err != nil { + return nil, false, err + } + return o.typer.ObjectKinds(obj) +} + +func (o *lazyObject) Recognizes(gvk schema.GroupVersionKind) bool { + if err := o.init(); err != nil { + return false + } + return o.typer.Recognizes(gvk) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index 1889b951..c2d51b43 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -89,7 +89,7 @@ func ListAccessor(obj interface{}) (List, error) { } return nil, errNotList default: - panic(fmt.Errorf("%T does not implement the List interface", obj)) + return nil, errNotList } } @@ -367,6 +367,23 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e return nil } +func (resourceAccessor) Continue(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetContinue(), nil +} + +func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetContinue(version) + return nil +} + // extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go index 3ebf2481..4e13efea 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/unstructured.go @@ -21,8 +21,24 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) +// InterfacesForUnstructuredConversion returns VersionInterfaces suitable for +// dealing with unstructured.Unstructured objects and supports conversion +// from typed objects (provided by parent) to untyped objects. +func InterfacesForUnstructuredConversion(parent VersionInterfacesFunc) VersionInterfacesFunc { + return func(version schema.GroupVersion) (*VersionInterfaces, error) { + if i, err := parent(version); err == nil { + return &VersionInterfaces{ + ObjectConvertor: i.ObjectConvertor, + MetadataAccessor: NewAccessor(), + }, nil + } + return InterfacesForUnstructured(version) + } +} + // InterfacesForUnstructured returns VersionInterfaces suitable for -// dealing with unstructured.Unstructured objects. +// dealing with unstructured.Unstructured objects. It will return errors for +// other conversions. func InterfacesForUnstructured(schema.GroupVersion) (*VersionInterfaces, error) { return &VersionInterfaces{ ObjectConvertor: &unstructured.UnstructuredObjectConverter{}, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go index 118dfca0..eb49f819 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package resource -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Quantity).DeepCopyInto(out.(*Quantity)) - return nil - }, InType: reflect.TypeOf(&Quantity{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Quantity) DeepCopyInto(out *Quantity) { *out = in.DeepCopy() diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go index 5150db01..0da94f50 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/registered/registered.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package to keep track of API Versions that can be registered and are enabled in api.Scheme. +// Package to keep track of API Versions that can be registered and are enabled in a Scheme. package registered import ( diff --git a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go index 213e34bc..baca784f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apimachinery/types.go @@ -38,7 +38,7 @@ type GroupMeta struct { // to go through the InterfacesFor method below. SelfLinker runtime.SelfLinker - // RESTMapper provides the default mapping between REST paths and the objects declared in api.Scheme and all known + // RESTMapper provides the default mapping between REST paths and the objects declared in a Scheme and all known // versions. RESTMapper meta.RESTMapper diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go index f13ff9fc..deaf5309 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/internalversion/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package internalversion import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *List) DeepCopyInto(out *List) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index a96f38ee..c62f8533 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -38,6 +38,7 @@ func AddConversionFuncs(scheme *runtime.Scheme) error { Convert_intstr_IntOrString_To_intstr_IntOrString, Convert_unversioned_Time_To_unversioned_Time, + Convert_unversioned_MicroTime_To_unversioned_MicroTime, Convert_Pointer_v1_Duration_To_v1_Duration, Convert_v1_Duration_To_Pointer_v1_Duration, @@ -199,6 +200,12 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s return nil } +func Convert_unversioned_MicroTime_To_unversioned_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + // Convert_Slice_string_To_unversioned_Time allows converting a URL query parameter value func Convert_Slice_string_To_unversioned_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 653b3023..b8218469 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -50,6 +50,7 @@ limitations under the License. MicroTime ObjectMeta OwnerReference + Patch Preconditions RootPaths ServerAddressByClientCIDR @@ -196,51 +197,55 @@ func (m *OwnerReference) Reset() { *m = OwnerReference{} } func (*OwnerReference) ProtoMessage() {} func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } + func (m *Preconditions) Reset() { *m = Preconditions{} } func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } func (m *RootPaths) Reset() { *m = RootPaths{} } func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} + return fileDescriptorGenerated, []int{28} } func (m *Status) Reset() { *m = Status{} } func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatusCause) Reset() { *m = StatusCause{} } func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } func (m *StatusDetails) Reset() { *m = StatusDetails{} } func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } func (m *Time) Reset() { *m = Time{} } func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } func (m *Timestamp) Reset() { *m = Timestamp{} } func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *TypeMeta) Reset() { *m = TypeMeta{} } func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } func (m *Verbs) Reset() { *m = Verbs{} } func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } func (m *WatchEvent) Reset() { *m = WatchEvent{} } func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -268,6 +273,7 @@ func init() { proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") @@ -1317,6 +1323,24 @@ func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2065,6 +2089,12 @@ func (m *OwnerReference) Size() (n int) { return n } +func (m *Patch) Size() (n int) { + var l int + _ = l + return n +} + func (m *Preconditions) Size() (n int) { var l int _ = l @@ -2464,6 +2494,15 @@ func (this *OwnerReference) String() string { }, "") return s } +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} func (this *Preconditions) String() string { if this == nil { return "nil" @@ -6382,6 +6421,56 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } return nil } +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Preconditions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -7715,7 +7804,7 @@ func init() { } var fileDescriptorGenerated = []byte{ - // 2428 bytes of a gzipped FileDescriptorProto + // 2435 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, 0x15, 0x4e, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa9, 0xcd, 0x80, 0x37, 0x02, 0x3b, 0xdb, 0x8b, 0x56, 0x59, 0x98, 0xb5, 0x49, 0x16, 0x56, 0xc3, 0x00, 0x03, 0xe9, 0x38, 0x33, 0x8a, 0x76, 0x32, @@ -7723,7 +7812,7 @@ var fileDescriptorGenerated = []byte{ 0x33, 0x09, 0x1c, 0xd8, 0x03, 0x48, 0x1c, 0x10, 0x9a, 0x23, 0x27, 0xb4, 0x23, 0xb8, 0x70, 0xe5, 0xc4, 0x05, 0x4e, 0x48, 0xcc, 0x71, 0x24, 0x2e, 0x7b, 0x40, 0xd6, 0x8e, 0xf7, 0xc0, 0x09, 0x71, 0xcf, 0x09, 0x55, 0x75, 0xf5, 0x9f, 0x1d, 0x4f, 0xda, 0x3b, 0x0b, 0xe2, 0x14, 0xf7, 0xfb, 0xf9, - 0xde, 0xab, 0xaa, 0xf7, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, + 0xde, 0xab, 0x57, 0xaf, 0x5e, 0xbd, 0x7a, 0x81, 0xbd, 0xe3, 0x6b, 0xac, 0x6e, 0x7b, 0x8d, 0xe3, 0xfe, 0x01, 0xa1, 0x2e, 0xe1, 0x84, 0x35, 0x4e, 0x88, 0xdb, 0xf6, 0x68, 0x43, 0x31, 0xcc, 0x9e, 0xdd, 0x35, 0xad, 0x23, 0xdb, 0x25, 0xf4, 0xac, 0xd1, 0x3b, 0xee, 0x08, 0x02, 0x6b, 0x74, 0x09, 0x37, 0x1b, 0x27, 0x1b, 0x8d, 0x0e, 0x71, 0x09, 0x35, 0x39, 0x69, 0xd7, 0x7b, 0xd4, 0xe3, 0x1e, @@ -7735,137 +7824,138 @@ var fileDescriptorGenerated = []byte{ 0xff, 0x96, 0x85, 0xc2, 0x56, 0x6b, 0xf7, 0x16, 0xf5, 0xfa, 0x3d, 0xb4, 0x06, 0xb3, 0xae, 0xd9, 0x25, 0x15, 0x6d, 0x4d, 0x5b, 0x2f, 0x1a, 0xf3, 0x4f, 0x07, 0xb5, 0x99, 0xe1, 0xa0, 0x36, 0x7b, 0xc7, 0xec, 0x12, 0x2c, 0x39, 0xc8, 0x81, 0xc2, 0x09, 0xa1, 0xcc, 0xf6, 0x5c, 0x56, 0xc9, 0xac, - 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x36, 0xad, 0x2e, 0x0d, 0xdc, 0xf7, 0x55, 0x6f, 0x7a, - 0xb4, 0x69, 0x33, 0xcb, 0x3b, 0x21, 0xf4, 0xcc, 0x58, 0x52, 0x56, 0x0a, 0x8a, 0xc9, 0x70, 0x68, - 0x01, 0xfd, 0x5c, 0x83, 0xa5, 0x1e, 0x25, 0x87, 0x84, 0x52, 0xd2, 0x56, 0xfc, 0x4a, 0x76, 0x4d, - 0xfb, 0x0c, 0xcc, 0x56, 0x94, 0xd9, 0xa5, 0xd6, 0x08, 0x3e, 0x1e, 0xb3, 0x88, 0x7e, 0xa7, 0xc1, - 0x2a, 0x23, 0xf4, 0x84, 0xd0, 0xad, 0x76, 0x9b, 0x12, 0xc6, 0x8c, 0xb3, 0x6d, 0xc7, 0x26, 0x2e, - 0xdf, 0xde, 0x6d, 0x62, 0x56, 0x99, 0x95, 0xfb, 0xf0, 0x9d, 0x74, 0x0e, 0xed, 0x4f, 0xc2, 0x31, - 0x74, 0xe5, 0xd1, 0xea, 0x44, 0x11, 0x86, 0x5f, 0xe0, 0x86, 0x7e, 0x08, 0xf3, 0xc1, 0x41, 0xde, - 0xb6, 0x19, 0x47, 0xf7, 0x21, 0xdf, 0x11, 0x1f, 0xac, 0xa2, 0x49, 0x07, 0xeb, 0xe9, 0x1c, 0x0c, - 0x30, 0x8c, 0x05, 0xe5, 0x4f, 0x5e, 0x7e, 0x32, 0xac, 0xd0, 0xf4, 0x3f, 0x67, 0xa1, 0xb4, 0xd5, - 0xda, 0xc5, 0x84, 0x79, 0x7d, 0x6a, 0x91, 0x14, 0x41, 0xb3, 0x09, 0x20, 0xfe, 0xb2, 0x9e, 0x69, - 0x91, 0x76, 0x25, 0xb3, 0xa6, 0xad, 0x17, 0x0c, 0xa4, 0xe4, 0xe0, 0x4e, 0xc8, 0xc1, 0x31, 0x29, - 0x81, 0x7a, 0x6c, 0xbb, 0x6d, 0x79, 0xda, 0x31, 0xd4, 0x77, 0x6d, 0xb7, 0x8d, 0x25, 0x07, 0xdd, - 0x86, 0xdc, 0x09, 0xa1, 0x07, 0x62, 0xff, 0x45, 0x40, 0x7c, 0x25, 0xdd, 0xf2, 0xee, 0x0b, 0x15, - 0xa3, 0x38, 0x1c, 0xd4, 0x72, 0xf2, 0x27, 0xf6, 0x41, 0x50, 0x1d, 0x80, 0x1d, 0x79, 0x94, 0x4b, - 0x77, 0x2a, 0xb9, 0xb5, 0xec, 0x7a, 0xd1, 0x58, 0x10, 0xfe, 0xed, 0x87, 0x54, 0x1c, 0x93, 0x40, - 0xd7, 0x60, 0x9e, 0xd9, 0x6e, 0xa7, 0xef, 0x98, 0x54, 0x10, 0x2a, 0x79, 0xe9, 0xe7, 0x8a, 0xf2, - 0x73, 0x7e, 0x3f, 0xc6, 0xc3, 0x09, 0x49, 0x61, 0xc9, 0x32, 0x39, 0xe9, 0x78, 0xd4, 0x26, 0xac, - 0x32, 0x17, 0x59, 0xda, 0x0e, 0xa9, 0x38, 0x26, 0x81, 0x5e, 0x87, 0x9c, 0xdc, 0xf9, 0x4a, 0x41, - 0x9a, 0x28, 0x2b, 0x13, 0x39, 0x79, 0x2c, 0xd8, 0xe7, 0xa1, 0x37, 0x61, 0x4e, 0x65, 0x4d, 0xa5, - 0x28, 0xc5, 0x16, 0x95, 0xd8, 0x5c, 0x10, 0xd6, 0x01, 0x5f, 0xff, 0xa3, 0x06, 0x8b, 0xb1, 0xf3, - 0x93, 0xb1, 0x72, 0x0d, 0xe6, 0x3b, 0xb1, 0x4c, 0x51, 0x67, 0x19, 0xae, 0x26, 0x9e, 0x45, 0x38, - 0x21, 0x89, 0x08, 0x14, 0xa9, 0x42, 0x0a, 0x2a, 0xc2, 0x46, 0xea, 0x40, 0x0b, 0x7c, 0x88, 0x2c, - 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x19, 0x74, 0x41, 0x8d, 0x40, 0xeb, 0xb1, 0x3a, - 0xa4, 0xc9, 0x2d, 0x9c, 0x9f, 0x50, 0x43, 0x2e, 0x49, 0xde, 0xcc, 0xff, 0x45, 0xf2, 0x5e, 0x2f, + 0x65, 0xd7, 0x4b, 0x9b, 0x37, 0xea, 0x69, 0x82, 0x56, 0x97, 0x06, 0xee, 0xfb, 0xaa, 0x37, 0x3d, + 0xda, 0xb4, 0x99, 0xe5, 0x9d, 0x10, 0x7a, 0x66, 0x2c, 0x29, 0x2b, 0x05, 0xc5, 0x64, 0x38, 0xb4, + 0x80, 0x7e, 0xae, 0xc1, 0x52, 0x8f, 0x92, 0x43, 0x42, 0x29, 0x69, 0x2b, 0x7e, 0x25, 0xbb, 0xa6, + 0x7d, 0x06, 0x66, 0x2b, 0xca, 0xec, 0x52, 0x6b, 0x04, 0x1f, 0x8f, 0x59, 0x44, 0xbf, 0xd3, 0x60, + 0x95, 0x11, 0x7a, 0x42, 0xe8, 0x56, 0xbb, 0x4d, 0x09, 0x63, 0xc6, 0xd9, 0xb6, 0x63, 0x13, 0x97, + 0x6f, 0xef, 0x36, 0x31, 0xab, 0xcc, 0xca, 0x38, 0x7c, 0x27, 0x9d, 0x43, 0xfb, 0x93, 0x70, 0x0c, + 0x5d, 0x79, 0xb4, 0x3a, 0x51, 0x84, 0xe1, 0x17, 0xb8, 0xa1, 0x1f, 0xc2, 0x7c, 0xb0, 0x91, 0xb7, + 0x6d, 0xc6, 0xd1, 0x7d, 0xc8, 0x77, 0xc4, 0x07, 0xab, 0x68, 0xd2, 0xc1, 0x7a, 0x3a, 0x07, 0x03, + 0x0c, 0x63, 0x41, 0xf9, 0x93, 0x97, 0x9f, 0x0c, 0x2b, 0x34, 0xfd, 0xcf, 0x59, 0x28, 0x6d, 0xb5, + 0x76, 0x31, 0x61, 0x5e, 0x9f, 0x5a, 0x24, 0x45, 0xd2, 0x6c, 0x02, 0x88, 0xbf, 0xac, 0x67, 0x5a, + 0xa4, 0x5d, 0xc9, 0xac, 0x69, 0xeb, 0x05, 0x03, 0x29, 0x39, 0xb8, 0x13, 0x72, 0x70, 0x4c, 0x4a, + 0xa0, 0x1e, 0xdb, 0x6e, 0x5b, 0xee, 0x76, 0x0c, 0xf5, 0x5d, 0xdb, 0x6d, 0x63, 0xc9, 0x41, 0xb7, + 0x21, 0x77, 0x42, 0xe8, 0x81, 0x88, 0xbf, 0x48, 0x88, 0xaf, 0xa4, 0x5b, 0xde, 0x7d, 0xa1, 0x62, + 0x14, 0x87, 0x83, 0x5a, 0x4e, 0xfe, 0xc4, 0x3e, 0x08, 0xaa, 0x03, 0xb0, 0x23, 0x8f, 0x72, 0xe9, + 0x4e, 0x25, 0xb7, 0x96, 0x5d, 0x2f, 0x1a, 0x0b, 0xc2, 0xbf, 0xfd, 0x90, 0x8a, 0x63, 0x12, 0xe8, + 0x1a, 0xcc, 0x33, 0xdb, 0xed, 0xf4, 0x1d, 0x93, 0x0a, 0x42, 0x25, 0x2f, 0xfd, 0x5c, 0x51, 0x7e, + 0xce, 0xef, 0xc7, 0x78, 0x38, 0x21, 0x29, 0x2c, 0x59, 0x26, 0x27, 0x1d, 0x8f, 0xda, 0x84, 0x55, + 0xe6, 0x22, 0x4b, 0xdb, 0x21, 0x15, 0xc7, 0x24, 0xd0, 0xeb, 0x90, 0x93, 0x91, 0xaf, 0x14, 0xa4, + 0x89, 0xb2, 0x32, 0x91, 0x93, 0xdb, 0x82, 0x7d, 0x1e, 0x7a, 0x13, 0xe6, 0xd4, 0xa9, 0xa9, 0x14, + 0xa5, 0xd8, 0xa2, 0x12, 0x9b, 0x0b, 0xd2, 0x3a, 0xe0, 0xeb, 0x7f, 0xd4, 0x60, 0x31, 0xb6, 0x7f, + 0x32, 0x57, 0xae, 0xc1, 0x7c, 0x27, 0x76, 0x52, 0xd4, 0x5e, 0x86, 0xab, 0x89, 0x9f, 0x22, 0x9c, + 0x90, 0x44, 0x04, 0x8a, 0x54, 0x21, 0x05, 0x15, 0x61, 0x23, 0x75, 0xa2, 0x05, 0x3e, 0x44, 0x96, + 0x62, 0x44, 0x86, 0x23, 0x64, 0xfd, 0x9f, 0x9a, 0x4c, 0xba, 0xa0, 0x46, 0xa0, 0xf5, 0x58, 0x1d, + 0xd2, 0x64, 0x08, 0xe7, 0x27, 0xd4, 0x90, 0x4b, 0x0e, 0x6f, 0xe6, 0xff, 0xe2, 0xf0, 0x5e, 0x2f, 0xfc, 0xe6, 0xc3, 0xda, 0xcc, 0x07, 0xff, 0x58, 0x9b, 0xd1, 0x3f, 0xc9, 0x40, 0xb9, 0x49, 0x1c, 0xc2, 0xc9, 0xdd, 0x1e, 0x97, 0x2b, 0xb8, 0x09, 0xa8, 0x43, 0x4d, 0x8b, 0xb4, 0x08, 0xb5, 0xbd, - 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x11, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, - 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0x22, 0x71, - 0xde, 0x4e, 0xb7, 0xf6, 0x56, 0x5c, 0xd5, 0x58, 0x1e, 0x0e, 0x6a, 0xe5, 0x04, 0x09, 0x27, 0xc1, - 0xd1, 0x77, 0x61, 0xc9, 0xa3, 0xbd, 0x23, 0xd3, 0x6d, 0x92, 0x1e, 0x71, 0xdb, 0xc4, 0xe5, 0x4c, - 0x26, 0x73, 0xc1, 0x58, 0x11, 0x65, 0xf7, 0xee, 0x08, 0x0f, 0x8f, 0x49, 0xa3, 0x07, 0xb0, 0xdc, - 0xa3, 0x5e, 0xcf, 0xec, 0x98, 0x02, 0xb1, 0xe5, 0x39, 0xb6, 0x75, 0x26, 0x93, 0xbd, 0x68, 0x5c, - 0x1d, 0x0e, 0x6a, 0xcb, 0xad, 0x51, 0xe6, 0xf9, 0xa0, 0xf6, 0x8a, 0xdc, 0x3a, 0x41, 0x89, 0x98, - 0x78, 0x1c, 0x46, 0xdf, 0x85, 0x42, 0xb3, 0x4f, 0x25, 0x05, 0x7d, 0x1b, 0x0a, 0x6d, 0xf5, 0x5b, - 0xed, 0xea, 0x6b, 0xc1, 0x9d, 0x14, 0xc8, 0x9c, 0x0f, 0x6a, 0x65, 0x71, 0xf5, 0xd6, 0x03, 0x02, - 0x0e, 0x55, 0xf4, 0x87, 0x50, 0xde, 0x39, 0xed, 0x79, 0x94, 0x07, 0xe7, 0xf5, 0x06, 0xe4, 0x89, - 0x24, 0x48, 0xb4, 0x42, 0x54, 0x48, 0x7d, 0x31, 0xac, 0xb8, 0x22, 0xb1, 0xc9, 0xa9, 0x69, 0x71, - 0x55, 0x11, 0xc3, 0xc4, 0xde, 0x11, 0x44, 0xec, 0xf3, 0xf4, 0x27, 0x1a, 0xc0, 0x2d, 0x12, 0x62, - 0x6f, 0xc1, 0x62, 0x90, 0x14, 0xc9, 0x5c, 0xfd, 0xbc, 0xd2, 0x5e, 0xc4, 0x49, 0x36, 0x1e, 0x95, - 0x47, 0x2d, 0x58, 0xb1, 0x5d, 0xcb, 0xe9, 0xb7, 0xc9, 0x3d, 0xd7, 0x76, 0x6d, 0x6e, 0x9b, 0x8e, - 0xfd, 0x93, 0xb0, 0x2e, 0x7f, 0x41, 0xe1, 0xac, 0xec, 0x5e, 0x20, 0x83, 0x2f, 0xd4, 0xd4, 0x1f, - 0x42, 0x51, 0x56, 0x08, 0x51, 0x9c, 0xa3, 0x72, 0xa5, 0xbd, 0xa0, 0x5c, 0x05, 0xd5, 0x3d, 0x33, - 0xa9, 0xba, 0xc7, 0x12, 0xc2, 0x81, 0xb2, 0xaf, 0x1b, 0x5c, 0x38, 0xa9, 0x2c, 0x5c, 0x85, 0x42, - 0xb0, 0x70, 0x65, 0x25, 0x6c, 0x34, 0x02, 0x20, 0x1c, 0x4a, 0xc4, 0xac, 0x1d, 0x41, 0xa2, 0xda, - 0xa5, 0x33, 0x16, 0xab, 0xbe, 0x99, 0x17, 0x57, 0xdf, 0x98, 0xa5, 0x9f, 0x41, 0x65, 0x52, 0x77, - 0xf2, 0x12, 0xf5, 0x38, 0xbd, 0x2b, 0xfa, 0xaf, 0x35, 0x58, 0x8a, 0x23, 0xa5, 0x3f, 0xbe, 0xf4, - 0x46, 0x2e, 0xbf, 0xc7, 0x63, 0x3b, 0xf2, 0x5b, 0x0d, 0x56, 0x12, 0x4b, 0x9b, 0xea, 0xc4, 0xa7, - 0x70, 0x2a, 0x1e, 0x1c, 0xd9, 0x29, 0x82, 0xa3, 0x01, 0xa5, 0xdd, 0x30, 0xee, 0xe9, 0xe5, 0x9d, - 0x8f, 0xfe, 0x17, 0x0d, 0xe6, 0x63, 0x1a, 0x0c, 0x3d, 0x84, 0x39, 0x51, 0xdf, 0x6c, 0xb7, 0xa3, - 0xba, 0xb2, 0x94, 0x97, 0x65, 0x0c, 0x24, 0x5a, 0x57, 0xcb, 0x47, 0xc2, 0x01, 0x24, 0x6a, 0x41, - 0x9e, 0x12, 0xd6, 0x77, 0xb8, 0x2a, 0xed, 0x57, 0x53, 0x5e, 0x6b, 0xdc, 0xe4, 0x7d, 0x66, 0x80, - 0xa8, 0x51, 0x58, 0xea, 0x63, 0x85, 0xa3, 0xff, 0x3d, 0x03, 0xe5, 0xdb, 0xe6, 0x01, 0x71, 0xf6, - 0x89, 0x43, 0x2c, 0xee, 0x51, 0xf4, 0x53, 0x28, 0x75, 0x4d, 0x6e, 0x1d, 0x49, 0x6a, 0xd0, 0x5b, - 0x36, 0xd3, 0x19, 0x4a, 0x20, 0xd5, 0xf7, 0x22, 0x98, 0x1d, 0x97, 0xd3, 0x33, 0xe3, 0x15, 0xb5, - 0xb0, 0x52, 0x8c, 0x83, 0xe3, 0xd6, 0xe4, 0x83, 0x40, 0x7e, 0xef, 0x9c, 0xf6, 0xc4, 0x25, 0x3a, - 0xfd, 0x3b, 0x24, 0xe1, 0x02, 0x26, 0xef, 0xf7, 0x6d, 0x4a, 0xba, 0xc4, 0xe5, 0xd1, 0x83, 0x60, - 0x6f, 0x04, 0x1f, 0x8f, 0x59, 0x5c, 0xbd, 0x01, 0x4b, 0xa3, 0xce, 0xa3, 0x25, 0xc8, 0x1e, 0x93, - 0x33, 0x3f, 0x16, 0xb0, 0xf8, 0x89, 0x56, 0x20, 0x77, 0x62, 0x3a, 0x7d, 0x55, 0x7f, 0xb0, 0xff, - 0x71, 0x3d, 0x73, 0x4d, 0xd3, 0x7f, 0xaf, 0x41, 0x65, 0x92, 0x23, 0xe8, 0x8b, 0x31, 0x20, 0xa3, - 0xa4, 0xbc, 0xca, 0xbe, 0x4b, 0xce, 0x7c, 0xd4, 0x1d, 0x28, 0x78, 0x3d, 0xf1, 0x84, 0xf3, 0xa8, - 0x8a, 0xf3, 0x37, 0x83, 0xd8, 0xbd, 0xab, 0xe8, 0xe7, 0x83, 0xda, 0x95, 0x04, 0x7c, 0xc0, 0xc0, - 0xa1, 0x2a, 0xd2, 0x21, 0x2f, 0xfd, 0x11, 0x97, 0xb2, 0x68, 0x9f, 0xe4, 0xe1, 0xdf, 0x97, 0x14, - 0xac, 0x38, 0xfa, 0x9f, 0x34, 0x98, 0x95, 0xed, 0xe1, 0x43, 0x28, 0x88, 0xfd, 0x6b, 0x9b, 0xdc, - 0x94, 0x7e, 0xa5, 0x7e, 0x4c, 0x08, 0xed, 0x3d, 0xc2, 0xcd, 0x28, 0xbf, 0x02, 0x0a, 0x0e, 0x11, - 0x11, 0x86, 0x9c, 0xcd, 0x49, 0x37, 0x38, 0xc8, 0xb7, 0x26, 0x42, 0xab, 0xf7, 0x6f, 0x1d, 0x9b, - 0x8f, 0x76, 0x4e, 0x39, 0x71, 0xc5, 0x61, 0x44, 0xc5, 0x60, 0x57, 0x60, 0x60, 0x1f, 0x4a, 0xff, - 0x83, 0x06, 0xa1, 0x29, 0x91, 0xee, 0x8c, 0x38, 0x87, 0xb7, 0x6d, 0xf7, 0x58, 0x6d, 0x6b, 0xe8, - 0xce, 0xbe, 0xa2, 0xe3, 0x50, 0xe2, 0xa2, 0x2b, 0x36, 0x33, 0xe5, 0x15, 0x7b, 0x15, 0x0a, 0x96, - 0xe7, 0x72, 0xdb, 0xed, 0x8f, 0xd5, 0x97, 0x6d, 0x45, 0xc7, 0xa1, 0x84, 0xfe, 0x2c, 0x0b, 0x25, - 0xe1, 0x6b, 0x70, 0xc7, 0x7f, 0x13, 0xca, 0x4e, 0xfc, 0xf4, 0x94, 0xcf, 0x57, 0x14, 0x44, 0x32, - 0x1f, 0x71, 0x52, 0x56, 0x28, 0x1f, 0xda, 0xc4, 0x69, 0x87, 0xca, 0x99, 0xa4, 0xf2, 0xcd, 0x38, - 0x13, 0x27, 0x65, 0x45, 0x9d, 0x7d, 0x24, 0xe2, 0x5a, 0x35, 0x6a, 0xe1, 0xd6, 0x7e, 0x4f, 0x10, - 0xb1, 0xcf, 0xbb, 0x68, 0x7f, 0x66, 0xa7, 0xdc, 0x9f, 0xeb, 0xb0, 0x20, 0x0e, 0xd2, 0xeb, 0xf3, - 0xa0, 0x9b, 0xcd, 0xc9, 0xbe, 0x0b, 0x0d, 0x07, 0xb5, 0x85, 0xf7, 0x12, 0x1c, 0x3c, 0x22, 0x39, - 0xb1, 0x7d, 0xc9, 0x7f, 0xda, 0xf6, 0x45, 0xac, 0xda, 0xb1, 0xbb, 0x36, 0xaf, 0xcc, 0x49, 0x27, - 0xc2, 0x55, 0xdf, 0x16, 0x44, 0xec, 0xf3, 0x12, 0x47, 0x5a, 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, - 0x9e, 0x6d, 0x51, 0x4f, 0xac, 0x45, 0x5c, 0x4c, 0x2c, 0xd1, 0xb4, 0x87, 0x05, 0x3c, 0x58, 0x63, - 0xc0, 0x17, 0xae, 0xb8, 0xa6, 0xeb, 0xf9, 0xad, 0x79, 0x2e, 0x72, 0xe5, 0x8e, 0x20, 0x62, 0x9f, - 0x77, 0x7d, 0x45, 0xdc, 0x47, 0xbf, 0x7c, 0x52, 0x9b, 0x79, 0xfc, 0xa4, 0x36, 0xf3, 0xe1, 0x13, - 0x75, 0x37, 0xfd, 0x0b, 0x00, 0xee, 0x1e, 0xfc, 0x98, 0x58, 0x7e, 0xcc, 0x5f, 0xfe, 0x2a, 0x17, - 0x3d, 0x86, 0x1a, 0x06, 0xc9, 0x17, 0x6c, 0x66, 0xa4, 0xc7, 0x88, 0xf1, 0x70, 0x42, 0x12, 0x35, - 0xa0, 0x18, 0xbe, 0xd4, 0x55, 0x7c, 0x2f, 0x2b, 0xb5, 0x62, 0xf8, 0x9c, 0xc7, 0x91, 0x4c, 0x22, - 0x01, 0x67, 0x2f, 0x4d, 0x40, 0x03, 0xb2, 0x7d, 0xbb, 0x2d, 0x43, 0xa2, 0x68, 0x7c, 0x35, 0x28, - 0x80, 0xf7, 0x76, 0x9b, 0xe7, 0x83, 0xda, 0x6b, 0x93, 0x66, 0x5c, 0xfc, 0xac, 0x47, 0x58, 0xfd, - 0xde, 0x6e, 0x13, 0x0b, 0xe5, 0x8b, 0x82, 0x34, 0x3f, 0x65, 0x90, 0x6e, 0x02, 0xa8, 0x55, 0x0b, - 0x6d, 0x3f, 0x36, 0xc2, 0xa9, 0xc5, 0xad, 0x90, 0x83, 0x63, 0x52, 0x88, 0xc1, 0xb2, 0x45, 0x89, - 0xfc, 0x2d, 0x8e, 0x9e, 0x71, 0xb3, 0xeb, 0xbf, 0xdb, 0x4b, 0x9b, 0x5f, 0x4e, 0x57, 0x31, 0x85, - 0x9a, 0xf1, 0xaa, 0x32, 0xb3, 0xbc, 0x3d, 0x0a, 0x86, 0xc7, 0xf1, 0x91, 0x07, 0xcb, 0x6d, 0xf5, - 0xea, 0x89, 0x8c, 0x16, 0xa7, 0x36, 0x7a, 0x45, 0x18, 0x6c, 0x8e, 0x02, 0xe1, 0x71, 0x6c, 0xf4, - 0x43, 0x58, 0x0d, 0x88, 0xe3, 0x4f, 0xcf, 0x0a, 0xc8, 0x9d, 0xaa, 0x8a, 0xc7, 0x70, 0x73, 0xa2, - 0x14, 0x7e, 0x01, 0x02, 0x6a, 0x43, 0xde, 0xf1, 0xbb, 0x8b, 0x92, 0xbc, 0x11, 0xbe, 0x95, 0x6e, - 0x15, 0x51, 0xf4, 0xd7, 0xe3, 0x5d, 0x45, 0xf8, 0xfc, 0x52, 0x0d, 0x85, 0xc2, 0x46, 0xa7, 0x50, - 0x32, 0x5d, 0xd7, 0xe3, 0xa6, 0xff, 0x18, 0x9e, 0x97, 0xa6, 0xb6, 0xa6, 0x36, 0xb5, 0x15, 0x61, - 0x8c, 0x74, 0x31, 0x31, 0x0e, 0x8e, 0x9b, 0x42, 0x8f, 0x60, 0xd1, 0x7b, 0xe4, 0x12, 0x8a, 0xc9, - 0x21, 0xa1, 0xc4, 0xb5, 0x08, 0xab, 0x94, 0xa5, 0xf5, 0xaf, 0xa5, 0xb4, 0x9e, 0x50, 0x8e, 0x42, - 0x3a, 0x49, 0x67, 0x78, 0xd4, 0x0a, 0xaa, 0x03, 0x1c, 0xda, 0xae, 0xea, 0x45, 0x2b, 0x0b, 0xd1, - 0xe8, 0xe9, 0x66, 0x48, 0xc5, 0x31, 0x09, 0xf4, 0x75, 0x28, 0x59, 0x4e, 0x9f, 0x71, 0xe2, 0xcf, - 0xb8, 0x16, 0x65, 0x06, 0x85, 0xeb, 0xdb, 0x8e, 0x58, 0x38, 0x2e, 0x87, 0x8e, 0x60, 0xde, 0x8e, - 0x35, 0xbd, 0x95, 0x25, 0x19, 0x8b, 0x9b, 0x53, 0x77, 0xba, 0xcc, 0x58, 0x12, 0x95, 0x28, 0x4e, - 0xc1, 0x09, 0xe4, 0xd5, 0x6f, 0x40, 0xe9, 0x53, 0xf6, 0x60, 0xa2, 0x87, 0x1b, 0x3d, 0xba, 0xa9, - 0x7a, 0xb8, 0xbf, 0x66, 0x60, 0x21, 0xb9, 0xe1, 0xe1, 0x5b, 0x47, 0x9b, 0x38, 0xb3, 0x0c, 0xaa, - 0x72, 0x76, 0x62, 0x55, 0x56, 0xc5, 0x6f, 0xf6, 0x65, 0x8a, 0xdf, 0x26, 0x80, 0xd9, 0xb3, 0x83, - 0xba, 0xe7, 0xd7, 0xd1, 0xb0, 0x72, 0x45, 0x53, 0x34, 0x1c, 0x93, 0x92, 0x53, 0x49, 0xcf, 0xe5, - 0xd4, 0x73, 0x1c, 0x42, 0xd5, 0x65, 0xea, 0x4f, 0x25, 0x43, 0x2a, 0x8e, 0x49, 0xa0, 0x9b, 0x80, - 0x0e, 0x1c, 0xcf, 0x3a, 0x96, 0x5b, 0x10, 0xe4, 0xb9, 0xac, 0x92, 0x05, 0x7f, 0x28, 0x65, 0x8c, - 0x71, 0xf1, 0x05, 0x1a, 0xfa, 0x5d, 0x48, 0x8e, 0x91, 0xd0, 0x0d, 0x7f, 0x03, 0xb4, 0x70, 0xce, - 0x33, 0xdd, 0xe2, 0xf5, 0xab, 0x50, 0xc4, 0x9e, 0xc7, 0x5b, 0x26, 0x3f, 0x62, 0xa8, 0x06, 0xb9, - 0x9e, 0xf8, 0xa1, 0x66, 0x84, 0x72, 0xec, 0x2b, 0x39, 0xd8, 0xa7, 0xeb, 0xbf, 0xd2, 0xe0, 0xd5, - 0x89, 0x23, 0x3b, 0xb1, 0x91, 0x56, 0xf8, 0xa5, 0x5c, 0x0a, 0x37, 0x32, 0x92, 0xc3, 0x31, 0x29, - 0xd1, 0x80, 0x25, 0xe6, 0x7c, 0xa3, 0x0d, 0x58, 0xc2, 0x1a, 0x4e, 0xca, 0xea, 0xff, 0xce, 0x40, - 0xde, 0x7f, 0x8d, 0xfd, 0x97, 0x7b, 0xee, 0x37, 0x20, 0xcf, 0xa4, 0x1d, 0xe5, 0x5e, 0x58, 0x24, - 0x7d, 0xeb, 0x58, 0x71, 0x45, 0xef, 0xd2, 0x25, 0x8c, 0x99, 0x9d, 0x20, 0x66, 0xc3, 0xde, 0x65, - 0xcf, 0x27, 0xe3, 0x80, 0x8f, 0xde, 0x11, 0x8f, 0x4f, 0x93, 0x85, 0xed, 0x60, 0x35, 0x80, 0xc4, - 0x92, 0x7a, 0x3e, 0xa8, 0xcd, 0x2b, 0x70, 0xf9, 0x8d, 0x95, 0x34, 0x7a, 0x00, 0x73, 0x6d, 0xc2, - 0x4d, 0xdb, 0xf1, 0xbb, 0xc0, 0xd4, 0x03, 0x49, 0x1f, 0xac, 0xe9, 0xab, 0x1a, 0x25, 0xe1, 0x93, - 0xfa, 0xc0, 0x01, 0xa0, 0xc8, 0x37, 0xcb, 0x6b, 0xfb, 0xd3, 0xf9, 0x5c, 0x94, 0x6f, 0xdb, 0x5e, - 0x9b, 0x60, 0xc9, 0xd1, 0x1f, 0x6b, 0x50, 0xf2, 0x91, 0xb6, 0xcd, 0x3e, 0x23, 0x68, 0x23, 0x5c, - 0x85, 0x7f, 0xdc, 0xc1, 0x55, 0x3c, 0xfb, 0xde, 0x59, 0x8f, 0x9c, 0x0f, 0x6a, 0x45, 0x29, 0x26, - 0x3e, 0xc2, 0x05, 0xc4, 0xf6, 0x28, 0x73, 0xc9, 0x1e, 0xbd, 0x0e, 0x39, 0xd9, 0x71, 0xab, 0xcd, - 0x0c, 0xfb, 0x3b, 0xd9, 0x95, 0x63, 0x9f, 0xa7, 0x7f, 0x9c, 0x81, 0x72, 0x62, 0x71, 0x29, 0x9a, - 0xb9, 0x70, 0x42, 0x92, 0x49, 0x31, 0x75, 0x9b, 0xfc, 0x3f, 0x95, 0xef, 0x43, 0xde, 0x12, 0xeb, - 0x0b, 0xfe, 0xa9, 0xb5, 0x31, 0xcd, 0x51, 0xc8, 0x9d, 0x89, 0x22, 0x49, 0x7e, 0x32, 0xac, 0x00, - 0xd1, 0x2d, 0x58, 0xa6, 0x84, 0xd3, 0xb3, 0xad, 0x43, 0x4e, 0x68, 0xbc, 0xed, 0xcf, 0x45, 0xed, - 0x0e, 0x1e, 0x15, 0xc0, 0xe3, 0x3a, 0x41, 0x85, 0xcc, 0xbf, 0x44, 0x85, 0xd4, 0x1d, 0x98, 0xfd, - 0x1f, 0xb6, 0xe6, 0x3f, 0x80, 0x62, 0xd4, 0x3c, 0x7d, 0xc6, 0x26, 0xf5, 0x1f, 0x41, 0x41, 0x44, - 0x63, 0xd0, 0xf4, 0x5f, 0x72, 0x01, 0x25, 0xaf, 0x86, 0x4c, 0x9a, 0xab, 0x41, 0xdf, 0x04, 0xff, - 0x5f, 0x65, 0xa2, 0x9a, 0xfa, 0x0f, 0xf5, 0x58, 0x35, 0x8d, 0xbf, 0xba, 0x63, 0x93, 0xb2, 0x5f, - 0x68, 0x00, 0xf2, 0xd5, 0xb8, 0x73, 0x42, 0x5c, 0x2e, 0x1c, 0x13, 0x27, 0x30, 0xea, 0x98, 0x4c, - 0x23, 0xc9, 0x41, 0xf7, 0x20, 0xef, 0xc9, 0xa6, 0x4a, 0x8d, 0xae, 0xa6, 0x9c, 0x02, 0x84, 0x51, - 0xe7, 0x77, 0x66, 0x58, 0x81, 0x19, 0xeb, 0x4f, 0x9f, 0x57, 0x67, 0x9e, 0x3d, 0xaf, 0xce, 0x7c, - 0xf4, 0xbc, 0x3a, 0xf3, 0xc1, 0xb0, 0xaa, 0x3d, 0x1d, 0x56, 0xb5, 0x67, 0xc3, 0xaa, 0xf6, 0xd1, - 0xb0, 0xaa, 0x7d, 0x3c, 0xac, 0x6a, 0x8f, 0x3f, 0xa9, 0xce, 0x3c, 0xc8, 0x9c, 0x6c, 0xfc, 0x27, - 0x00, 0x00, 0xff, 0xff, 0x66, 0xe7, 0x2a, 0x84, 0x4b, 0x20, 0x00, 0x00, + 0xf6, 0x3e, 0xb1, 0x3c, 0xb7, 0xcd, 0xe4, 0x16, 0x65, 0x8d, 0xcf, 0x0d, 0x07, 0x35, 0x74, 0x6b, + 0x8c, 0x8b, 0x2f, 0xd0, 0x40, 0x0e, 0x94, 0x7b, 0x54, 0xfe, 0xb6, 0xb9, 0x2a, 0xe0, 0xe2, 0xe0, + 0xbc, 0x9d, 0x6e, 0xed, 0xad, 0xb8, 0xaa, 0xb1, 0x3c, 0x1c, 0xd4, 0xca, 0x09, 0x12, 0x4e, 0x82, + 0xa3, 0xef, 0xc2, 0x92, 0x47, 0x7b, 0x47, 0xa6, 0xdb, 0x24, 0x3d, 0xe2, 0xb6, 0x89, 0xcb, 0x99, + 0x3c, 0xcc, 0x05, 0x63, 0x45, 0x94, 0xdd, 0xbb, 0x23, 0x3c, 0x3c, 0x26, 0x8d, 0x1e, 0xc0, 0x72, + 0x8f, 0x7a, 0x3d, 0xb3, 0x63, 0x0a, 0xc4, 0x96, 0xe7, 0xd8, 0xd6, 0x99, 0x3c, 0xec, 0x45, 0xe3, + 0xea, 0x70, 0x50, 0x5b, 0x6e, 0x8d, 0x32, 0xcf, 0x07, 0xb5, 0x57, 0x64, 0xe8, 0x04, 0x25, 0x62, + 0xe2, 0x71, 0x18, 0x7d, 0x17, 0x0a, 0xcd, 0x3e, 0x95, 0x14, 0xf4, 0x6d, 0x28, 0xb4, 0xd5, 0x6f, + 0x15, 0xd5, 0xd7, 0x82, 0x3b, 0x29, 0x90, 0x39, 0x1f, 0xd4, 0xca, 0xe2, 0xea, 0xad, 0x07, 0x04, + 0x1c, 0xaa, 0xe8, 0x0f, 0xa1, 0xbc, 0x73, 0xda, 0xf3, 0x28, 0x0f, 0xf6, 0xeb, 0x0d, 0xc8, 0x13, + 0x49, 0x90, 0x68, 0x85, 0xa8, 0x90, 0xfa, 0x62, 0x58, 0x71, 0xc5, 0xc1, 0x26, 0xa7, 0xa6, 0xc5, + 0x55, 0x45, 0x0c, 0x0f, 0xf6, 0x8e, 0x20, 0x62, 0x9f, 0xa7, 0x3f, 0xd1, 0x00, 0x6e, 0x91, 0x10, + 0x7b, 0x0b, 0x16, 0x83, 0x43, 0x91, 0x3c, 0xab, 0x9f, 0x57, 0xda, 0x8b, 0x38, 0xc9, 0xc6, 0xa3, + 0xf2, 0xa8, 0x05, 0x2b, 0xb6, 0x6b, 0x39, 0xfd, 0x36, 0xb9, 0xe7, 0xda, 0xae, 0xcd, 0x6d, 0xd3, + 0xb1, 0x7f, 0x12, 0xd6, 0xe5, 0x2f, 0x28, 0x9c, 0x95, 0xdd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, + 0x43, 0x28, 0xca, 0x0a, 0x21, 0x8a, 0x73, 0x54, 0xae, 0xb4, 0x17, 0x94, 0xab, 0xa0, 0xba, 0x67, + 0x26, 0x55, 0xf7, 0xd8, 0x81, 0x70, 0xa0, 0xec, 0xeb, 0x06, 0x17, 0x4e, 0x2a, 0x0b, 0x57, 0xa1, + 0x10, 0x2c, 0x5c, 0x59, 0x09, 0x1b, 0x8d, 0x00, 0x08, 0x87, 0x12, 0x31, 0x6b, 0x47, 0x90, 0xa8, + 0x76, 0xe9, 0x8c, 0xc5, 0xaa, 0x6f, 0xe6, 0xc5, 0xd5, 0x37, 0x66, 0xe9, 0x67, 0x50, 0x99, 0xd4, + 0x9d, 0xbc, 0x44, 0x3d, 0x4e, 0xef, 0x8a, 0xfe, 0x6b, 0x0d, 0x96, 0xe2, 0x48, 0xe9, 0xb7, 0x2f, + 0xbd, 0x91, 0xcb, 0xef, 0xf1, 0x58, 0x44, 0x7e, 0xab, 0xc1, 0x4a, 0x62, 0x69, 0x53, 0xed, 0xf8, + 0x14, 0x4e, 0xc5, 0x93, 0x23, 0x3b, 0x45, 0x72, 0x34, 0xa0, 0xb4, 0x1b, 0xe6, 0x3d, 0xbd, 0xbc, + 0xf3, 0xd1, 0xff, 0xa2, 0xc1, 0x7c, 0x4c, 0x83, 0xa1, 0x87, 0x30, 0x27, 0xea, 0x9b, 0xed, 0x76, + 0x54, 0x57, 0x96, 0xf2, 0xb2, 0x8c, 0x81, 0x44, 0xeb, 0x6a, 0xf9, 0x48, 0x38, 0x80, 0x44, 0x2d, + 0xc8, 0x53, 0xc2, 0xfa, 0x0e, 0x57, 0xa5, 0xfd, 0x6a, 0xca, 0x6b, 0x8d, 0x9b, 0xbc, 0xcf, 0x0c, + 0x10, 0x35, 0x0a, 0x4b, 0x7d, 0xac, 0x70, 0xf4, 0xbf, 0x67, 0xa0, 0x7c, 0xdb, 0x3c, 0x20, 0xce, + 0x3e, 0x71, 0x88, 0xc5, 0x3d, 0x8a, 0x7e, 0x0a, 0xa5, 0xae, 0xc9, 0xad, 0x23, 0x49, 0x0d, 0x7a, + 0xcb, 0x66, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x5e, 0x04, 0xb3, 0xe3, 0x72, 0x7a, 0x66, 0xbc, 0xa2, + 0x16, 0x56, 0x8a, 0x71, 0x70, 0xdc, 0x9a, 0x7c, 0x10, 0xc8, 0xef, 0x9d, 0xd3, 0x9e, 0xb8, 0x44, + 0xa7, 0x7f, 0x87, 0x24, 0x5c, 0xc0, 0xe4, 0xfd, 0xbe, 0x4d, 0x49, 0x97, 0xb8, 0x3c, 0x7a, 0x10, + 0xec, 0x8d, 0xe0, 0xe3, 0x31, 0x8b, 0xab, 0x37, 0x60, 0x69, 0xd4, 0x79, 0xb4, 0x04, 0xd9, 0x63, + 0x72, 0xe6, 0xe7, 0x02, 0x16, 0x3f, 0xd1, 0x0a, 0xe4, 0x4e, 0x4c, 0xa7, 0xaf, 0xea, 0x0f, 0xf6, + 0x3f, 0xae, 0x67, 0xae, 0x69, 0xfa, 0xef, 0x35, 0xa8, 0x4c, 0x72, 0x04, 0x7d, 0x31, 0x06, 0x64, + 0x94, 0x94, 0x57, 0xd9, 0x77, 0xc9, 0x99, 0x8f, 0xba, 0x03, 0x05, 0xaf, 0x27, 0x9e, 0x70, 0x1e, + 0x55, 0x79, 0xfe, 0x66, 0x90, 0xbb, 0x77, 0x15, 0xfd, 0x7c, 0x50, 0xbb, 0x92, 0x80, 0x0f, 0x18, + 0x38, 0x54, 0x45, 0x3a, 0xe4, 0xa5, 0x3f, 0xe2, 0x52, 0x16, 0xed, 0x93, 0xdc, 0xfc, 0xfb, 0x92, + 0x82, 0x15, 0x47, 0xff, 0x93, 0x06, 0xb3, 0xb2, 0x3d, 0x7c, 0x08, 0x05, 0x11, 0xbf, 0xb6, 0xc9, + 0x4d, 0xe9, 0x57, 0xea, 0xc7, 0x84, 0xd0, 0xde, 0x23, 0xdc, 0x8c, 0xce, 0x57, 0x40, 0xc1, 0x21, + 0x22, 0xc2, 0x90, 0xb3, 0x39, 0xe9, 0x06, 0x1b, 0xf9, 0xd6, 0x44, 0x68, 0xf5, 0xfe, 0xad, 0x63, + 0xf3, 0xd1, 0xce, 0x29, 0x27, 0xae, 0xd8, 0x8c, 0xa8, 0x18, 0xec, 0x0a, 0x0c, 0xec, 0x43, 0xe9, + 0x7f, 0xd0, 0x20, 0x34, 0x25, 0x8e, 0x3b, 0x23, 0xce, 0xe1, 0x6d, 0xdb, 0x3d, 0x56, 0x61, 0x0d, + 0xdd, 0xd9, 0x57, 0x74, 0x1c, 0x4a, 0x5c, 0x74, 0xc5, 0x66, 0xa6, 0xbc, 0x62, 0xaf, 0x42, 0xc1, + 0xf2, 0x5c, 0x6e, 0xbb, 0xfd, 0xb1, 0xfa, 0xb2, 0xad, 0xe8, 0x38, 0x94, 0xd0, 0x9f, 0x65, 0xa1, + 0x24, 0x7c, 0x0d, 0xee, 0xf8, 0x6f, 0x42, 0xd9, 0x89, 0xef, 0x9e, 0xf2, 0xf9, 0x8a, 0x82, 0x48, + 0x9e, 0x47, 0x9c, 0x94, 0x15, 0xca, 0x87, 0x36, 0x71, 0xda, 0xa1, 0x72, 0x26, 0xa9, 0x7c, 0x33, + 0xce, 0xc4, 0x49, 0x59, 0x51, 0x67, 0x1f, 0x89, 0xbc, 0x56, 0x8d, 0x5a, 0x18, 0xda, 0xef, 0x09, + 0x22, 0xf6, 0x79, 0x17, 0xc5, 0x67, 0x76, 0xca, 0xf8, 0x5c, 0x87, 0x05, 0xb1, 0x91, 0x5e, 0x9f, + 0x07, 0xdd, 0x6c, 0x4e, 0xf6, 0x5d, 0x68, 0x38, 0xa8, 0x2d, 0xbc, 0x97, 0xe0, 0xe0, 0x11, 0xc9, + 0x89, 0xed, 0x4b, 0xfe, 0xd3, 0xb6, 0x2f, 0x62, 0xd5, 0x8e, 0xdd, 0xb5, 0x79, 0x65, 0x4e, 0x3a, + 0x11, 0xae, 0xfa, 0xb6, 0x20, 0x62, 0x9f, 0x97, 0xd8, 0xd2, 0xc2, 0xa5, 0x5b, 0xfa, 0x3e, 0x14, + 0xf7, 0x6c, 0x8b, 0x7a, 0x62, 0x2d, 0xe2, 0x62, 0x62, 0x89, 0xa6, 0x3d, 0x2c, 0xe0, 0xc1, 0x1a, + 0x03, 0xbe, 0x70, 0xc5, 0x35, 0x5d, 0xcf, 0x6f, 0xcd, 0x73, 0x91, 0x2b, 0x77, 0x04, 0x11, 0xfb, + 0xbc, 0xeb, 0x2b, 0xe2, 0x3e, 0xfa, 0xe5, 0x93, 0xda, 0xcc, 0xe3, 0x27, 0xb5, 0x99, 0x0f, 0x9f, + 0xa8, 0xbb, 0xe9, 0x5f, 0x00, 0x70, 0xf7, 0xe0, 0xc7, 0xc4, 0xf2, 0x73, 0xfe, 0xf2, 0x57, 0xb9, + 0xe8, 0x31, 0xd4, 0x30, 0x48, 0xbe, 0x60, 0x33, 0x23, 0x3d, 0x46, 0x8c, 0x87, 0x13, 0x92, 0xa8, + 0x01, 0xc5, 0xf0, 0xa5, 0xae, 0xf2, 0x7b, 0x59, 0xa9, 0x15, 0xc3, 0xe7, 0x3c, 0x8e, 0x64, 0x12, + 0x07, 0x70, 0xf6, 0xd2, 0x03, 0x68, 0x40, 0xb6, 0x6f, 0xb7, 0x65, 0x4a, 0x14, 0x8d, 0xaf, 0x06, + 0x05, 0xf0, 0xde, 0x6e, 0xf3, 0x7c, 0x50, 0x7b, 0x6d, 0xd2, 0x8c, 0x8b, 0x9f, 0xf5, 0x08, 0xab, + 0xdf, 0xdb, 0x6d, 0x62, 0xa1, 0x7c, 0x51, 0x92, 0xe6, 0xa7, 0x4c, 0xd2, 0x4d, 0x00, 0xb5, 0x6a, + 0xa1, 0xed, 0xe7, 0x46, 0x38, 0xb5, 0xb8, 0x15, 0x72, 0x70, 0x4c, 0x0a, 0x31, 0x58, 0xb6, 0x28, + 0x91, 0xbf, 0xc5, 0xd6, 0x33, 0x6e, 0x76, 0xfd, 0x77, 0x7b, 0x69, 0xf3, 0xcb, 0xe9, 0x2a, 0xa6, + 0x50, 0x33, 0x5e, 0x55, 0x66, 0x96, 0xb7, 0x47, 0xc1, 0xf0, 0x38, 0x3e, 0xf2, 0x60, 0xb9, 0xad, + 0x5e, 0x3d, 0x91, 0xd1, 0xe2, 0xd4, 0x46, 0xaf, 0x08, 0x83, 0xcd, 0x51, 0x20, 0x3c, 0x8e, 0x8d, + 0x7e, 0x08, 0xab, 0x01, 0x71, 0xfc, 0xe9, 0x59, 0x01, 0x19, 0xa9, 0xaa, 0x78, 0x0c, 0x37, 0x27, + 0x4a, 0xe1, 0x17, 0x20, 0xa0, 0x36, 0xe4, 0x1d, 0xbf, 0xbb, 0x28, 0xc9, 0x1b, 0xe1, 0x5b, 0xe9, + 0x56, 0x11, 0x65, 0x7f, 0x3d, 0xde, 0x55, 0x84, 0xcf, 0x2f, 0xd5, 0x50, 0x28, 0x6c, 0x74, 0x0a, + 0x25, 0xd3, 0x75, 0x3d, 0x6e, 0xfa, 0x8f, 0xe1, 0x79, 0x69, 0x6a, 0x6b, 0x6a, 0x53, 0x5b, 0x11, + 0xc6, 0x48, 0x17, 0x13, 0xe3, 0xe0, 0xb8, 0x29, 0xf4, 0x08, 0x16, 0xbd, 0x47, 0x2e, 0xa1, 0x98, + 0x1c, 0x12, 0x4a, 0x5c, 0x8b, 0xb0, 0x4a, 0x59, 0x5a, 0xff, 0x5a, 0x4a, 0xeb, 0x09, 0xe5, 0x28, + 0xa5, 0x93, 0x74, 0x86, 0x47, 0xad, 0xa0, 0x3a, 0xc0, 0xa1, 0xed, 0xaa, 0x5e, 0xb4, 0xb2, 0x10, + 0x8d, 0x9e, 0x6e, 0x86, 0x54, 0x1c, 0x93, 0x40, 0x5f, 0x87, 0x92, 0xe5, 0xf4, 0x19, 0x27, 0xfe, + 0x8c, 0x6b, 0x51, 0x9e, 0xa0, 0x70, 0x7d, 0xdb, 0x11, 0x0b, 0xc7, 0xe5, 0xd0, 0x11, 0xcc, 0xdb, + 0xb1, 0xa6, 0xb7, 0xb2, 0x24, 0x73, 0x71, 0x73, 0xea, 0x4e, 0x97, 0x19, 0x4b, 0xa2, 0x12, 0xc5, + 0x29, 0x38, 0x81, 0xbc, 0xfa, 0x0d, 0x28, 0x7d, 0xca, 0x1e, 0x4c, 0xf4, 0x70, 0xa3, 0x5b, 0x37, + 0x55, 0x0f, 0xf7, 0xd7, 0x0c, 0x2c, 0x24, 0x03, 0x1e, 0xbe, 0x75, 0xb4, 0x89, 0x33, 0xcb, 0xa0, + 0x2a, 0x67, 0x27, 0x56, 0x65, 0x55, 0xfc, 0x66, 0x5f, 0xa6, 0xf8, 0x6d, 0x02, 0x98, 0x3d, 0x3b, + 0xa8, 0x7b, 0x7e, 0x1d, 0x0d, 0x2b, 0x57, 0x34, 0x45, 0xc3, 0x31, 0x29, 0x39, 0x95, 0xf4, 0x5c, + 0x4e, 0x3d, 0xc7, 0x21, 0x54, 0x5d, 0xa6, 0xfe, 0x54, 0x32, 0xa4, 0xe2, 0x98, 0x04, 0xba, 0x09, + 0xe8, 0xc0, 0xf1, 0xac, 0x63, 0x19, 0x82, 0xe0, 0x9c, 0xcb, 0x2a, 0x59, 0xf0, 0x87, 0x52, 0xc6, + 0x18, 0x17, 0x5f, 0xa0, 0xa1, 0xcf, 0x41, 0xae, 0x25, 0xda, 0x0a, 0xfd, 0x2e, 0x24, 0xe7, 0x49, + 0xe8, 0x86, 0x1f, 0x09, 0x2d, 0x1c, 0xf8, 0x4c, 0x17, 0x05, 0xfd, 0x2a, 0x14, 0xb1, 0xe7, 0xf1, + 0x96, 0xc9, 0x8f, 0x18, 0xaa, 0x41, 0xae, 0x27, 0x7e, 0xa8, 0x61, 0xa1, 0x9c, 0xff, 0x4a, 0x0e, + 0xf6, 0xe9, 0xfa, 0xaf, 0x34, 0x78, 0x75, 0xe2, 0xec, 0x4e, 0x44, 0xd4, 0x0a, 0xbf, 0x94, 0x4b, + 0x61, 0x44, 0x23, 0x39, 0x1c, 0x93, 0x12, 0x9d, 0x58, 0x62, 0xe0, 0x37, 0xda, 0x89, 0x25, 0xac, + 0xe1, 0xa4, 0xac, 0xfe, 0xef, 0x0c, 0xe4, 0xfd, 0x67, 0xd9, 0x7f, 0xb9, 0xf9, 0x7e, 0x03, 0xf2, + 0x4c, 0xda, 0x51, 0xee, 0x85, 0xd5, 0xd2, 0xb7, 0x8e, 0x15, 0x57, 0x34, 0x31, 0x5d, 0xc2, 0x98, + 0xd9, 0x09, 0x92, 0x37, 0x6c, 0x62, 0xf6, 0x7c, 0x32, 0x0e, 0xf8, 0xe8, 0x1d, 0xf1, 0x0a, 0x35, + 0x59, 0xd8, 0x17, 0x56, 0x03, 0x48, 0x2c, 0xa9, 0xe7, 0x83, 0xda, 0xbc, 0x02, 0x97, 0xdf, 0x58, + 0x49, 0xa3, 0x07, 0x30, 0xd7, 0x26, 0xdc, 0xb4, 0x1d, 0xbf, 0x1d, 0x4c, 0x3d, 0x99, 0xf4, 0xc1, + 0x9a, 0xbe, 0xaa, 0x51, 0x12, 0x3e, 0xa9, 0x0f, 0x1c, 0x00, 0x8a, 0x83, 0x67, 0x79, 0x6d, 0x7f, + 0x4c, 0x9f, 0x8b, 0x0e, 0xde, 0xb6, 0xd7, 0x26, 0x58, 0x72, 0xf4, 0xc7, 0x1a, 0x94, 0x7c, 0xa4, + 0x6d, 0xb3, 0xcf, 0x08, 0xda, 0x08, 0x57, 0xe1, 0x6f, 0x77, 0x70, 0x27, 0xcf, 0xbe, 0x77, 0xd6, + 0x23, 0xe7, 0x83, 0x5a, 0x51, 0x8a, 0x89, 0x8f, 0x70, 0x01, 0xb1, 0x18, 0x65, 0x2e, 0x89, 0xd1, + 0xeb, 0x90, 0x93, 0xad, 0xb7, 0x0a, 0x66, 0xd8, 0xe8, 0xc9, 0xf6, 0x1c, 0xfb, 0x3c, 0xfd, 0xe3, + 0x0c, 0x94, 0x13, 0x8b, 0x4b, 0xd1, 0xd5, 0x85, 0xa3, 0x92, 0x4c, 0x8a, 0xf1, 0xdb, 0xe4, 0x7f, + 0xae, 0x7c, 0x1f, 0xf2, 0x96, 0x58, 0x5f, 0xf0, 0xdf, 0xad, 0x8d, 0x69, 0xb6, 0x42, 0x46, 0x26, + 0xca, 0x24, 0xf9, 0xc9, 0xb0, 0x02, 0x44, 0xb7, 0x60, 0x99, 0x12, 0x4e, 0xcf, 0xb6, 0x0e, 0x39, + 0xa1, 0xf1, 0xfe, 0x3f, 0x17, 0xf5, 0x3d, 0x78, 0x54, 0x00, 0x8f, 0xeb, 0x04, 0xa5, 0x32, 0xff, + 0x12, 0xa5, 0x52, 0x77, 0x60, 0xf6, 0x7f, 0xd8, 0xa3, 0xff, 0x00, 0x8a, 0x51, 0x17, 0xf5, 0x19, + 0x9b, 0xd4, 0x7f, 0x04, 0x05, 0x91, 0x8d, 0x41, 0xf7, 0x7f, 0xc9, 0x4d, 0x94, 0xbc, 0x23, 0x32, + 0x69, 0xee, 0x08, 0x7d, 0x13, 0xfc, 0xff, 0x99, 0x89, 0x6a, 0xea, 0xbf, 0xd8, 0x63, 0xd5, 0x34, + 0xfe, 0xfc, 0x8e, 0x8d, 0xcc, 0x7e, 0xa1, 0x01, 0xc8, 0xe7, 0xe3, 0xce, 0x09, 0x71, 0xb9, 0x70, + 0x4c, 0xec, 0xc0, 0xa8, 0x63, 0xf2, 0x18, 0x49, 0x0e, 0xba, 0x07, 0x79, 0x4f, 0x76, 0x57, 0x6a, + 0x86, 0x35, 0xe5, 0x38, 0x20, 0xcc, 0x3a, 0xbf, 0x45, 0xc3, 0x0a, 0xcc, 0x58, 0x7f, 0xfa, 0xbc, + 0x3a, 0xf3, 0xec, 0x79, 0x75, 0xe6, 0xa3, 0xe7, 0xd5, 0x99, 0x0f, 0x86, 0x55, 0xed, 0xe9, 0xb0, + 0xaa, 0x3d, 0x1b, 0x56, 0xb5, 0x8f, 0x86, 0x55, 0xed, 0xe3, 0x61, 0x55, 0x7b, 0xfc, 0x49, 0x75, + 0xe6, 0x41, 0xe6, 0x64, 0xe3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x6c, 0xc5, 0x28, 0xb2, 0x54, + 0x20, 0x00, 0x00, } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index a547d78d..ab23a071 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -149,6 +149,10 @@ message DeleteOptions { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional optional string propagationPolicy = 4; } @@ -382,7 +386,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -510,15 +514,16 @@ message ObjectMeta { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -616,6 +621,10 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. @@ -781,7 +790,7 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index 6e449a43..b300d370 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -70,7 +70,6 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) AddConversionFuncs(scheme) RegisterDefaults(scheme) } @@ -90,6 +89,5 @@ func init() { ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) RegisterDefaults(scheme) } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index 13ae66c6..c8ee4e5d 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -177,15 +177,16 @@ type ObjectMeta struct { // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This // field is set by the server when a graceful deletion is requested by the user, and is not // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field. Once set, - // this value may not be unset or be set further into the future, although it may be shortened - // or the resource may be deleted prior to this time. For example, a user may request that - // a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination - // signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard - // termination signal (SIGKILL) to the container and after cleanup, remove the pod from the - // API. In the presence of network partitions, this object may still exist after this - // timestamp, until an administrator or automated process can determine the resource is - // fully terminated. + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. // // Populated by the system when a graceful deletion is requested. @@ -445,6 +446,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index 49d2de1e..5dbba4b0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -90,7 +90,7 @@ var map_DeleteOptions = map[string]string{ "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", } func (DeleteOptions) SwaggerDoc() map[string]string { @@ -214,7 +214,7 @@ var map_ObjectMeta = map[string]string{ "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field. Once set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go new file mode 100644 index 00000000..8d035254 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -0,0 +1,453 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + gojson "encoding/json" + "errors" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" +) + +// NestedFieldCopy returns a deep copy of the value of a nested field. +// false is returned if the value is missing. +// nil, true is returned for a nil field. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSONValue(val), true +} + +func nestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool) { + var val interface{} = obj + for _, field := range fields { + if m, ok := val.(map[string]interface{}); ok { + val, ok = m[field] + if !ok { + return nil, false + } + } else { + // Expected map[string]interface{}, got something else + return nil, false + } + } + return val, true +} + +// NestedString returns the string value of a nested field. +// Returns false if value is not found or is not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return "", false + } + s, ok := val.(string) + return s, ok +} + +// NestedBool returns the bool value of a nested field. +// Returns false if value is not found or is not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return false, false + } + b, ok := val.(bool) + return b, ok +} + +// NestedFloat64 returns the bool value of a nested field. +// Returns false if value is not found or is not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + f, ok := val.(float64) + return f, ok +} + +// NestedInt64 returns the int64 value of a nested field. +// Returns false if value is not found or is not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return 0, false + } + i, ok := val.(int64) + return i, ok +} + +// NestedStringSlice returns a copy of []string value of a nested field. +// Returns false if value is not found, is not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if m, ok := val.([]interface{}); ok { + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false + } + } + return strSlice, true + } + return nil, false +} + +// NestedSlice returns a deep copy of []interface{} value of a nested field. +// Returns false if value is not found or is not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + if _, ok := val.([]interface{}); ok { + return runtime.DeepCopyJSONValue(val).([]interface{}), true + } + return nil, false +} + +// NestedStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found, is not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else { + return nil, false + } + } + return strMap, true +} + +// NestedMap returns a deep copy of map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + m, ok := nestedMapNoCopy(obj, fields...) + if !ok { + return nil, false + } + return runtime.DeepCopyJSON(m), true +} + +// nestedMapNoCopy returns a map[string]interface{} value of a nested field. +// Returns false if value is not found or is not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool) { + val, ok := nestedFieldNoCopy(obj, fields...) + if !ok { + return nil, false + } + m, ok := val.(map[string]interface{}) + return m, ok +} + +// SetNestedField sets the value of a nested field to a deep copy of the value provided. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) bool { + return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) +} + +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) bool { + m := obj + for _, field := range fields[:len(fields)-1] { + if val, ok := m[field]; ok { + if valMap, ok := val.(map[string]interface{}); ok { + m = valMap + } else { + return false + } + } else { + newVal := make(map[string]interface{}) + m[field] = newVal + m = newVal + } + } + m[fields[len(fields)-1]] = value + return true +} + +// SetNestedStringSlice sets the string slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) bool { + m := make([]interface{}, 0, len(value)) // convert []string into []interface{} + for _, v := range value { + m = append(m, v) + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedSlice sets the slice value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// SetNestedStringMap sets the map[string]string value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) bool { + m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} + for k, v := range value { + m[k] = v + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedMap sets the map[string]interface{} value of a nested field. +// Returns false if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) bool { + return SetNestedField(obj, value, fields...) +} + +// RemoveNestedField removes the nested field from the obj. +func RemoveNestedField(obj map[string]interface{}, fields ...string) { + m := obj + for _, field := range fields[:len(fields)-1] { + if x, ok := m[field].(map[string]interface{}); ok { + m = x + } else { + return + } + } + delete(m, fields[len(fields)-1]) +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, ok := NestedString(obj, fields...) + if !ok { + return "" + } + return val +} + +func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + if controller, ok := NestedBool(v, "controller"); ok { + controllerPtr = &controller + } + var blockOwnerDeletionPtr *bool + if blockOwnerDeletion, ok := NestedBool(v, "blockOwnerDeletion"); ok { + blockOwnerDeletionPtr = &blockOwnerDeletion + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: types.UID(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + listObj := make(map[string]interface{}, len(t.Object)+1) + for k, v := range t.Object { // Make a shallow copy + listObj[k] = v + } + listObj["items"] = items + return json.NewEncoder(w).Encode(listObj) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = make([]Unstructured, 0, len(dList.Items)) + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +// UnstructuredObjectConverter is an ObjectConverter for use with +// Unstructured objects. Since it has no schema or type information, +// it will only succeed for no-op conversions. This is provided as a +// sane implementation for APIs that require an object converter. +type UnstructuredObjectConverter struct{} + +func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { + unstructIn, ok := in.(*Unstructured) + if !ok { + return fmt.Errorf("input type %T in not valid for unstructured conversion", in) + } + + unstructOut, ok := out.(*Unstructured) + if !ok { + return fmt.Errorf("output type %T in not valid for unstructured conversion", out) + } + + // maybe deep copy the map? It is documented in the + // ObjectConverter interface that this function is not + // guaranteed to not mutate the input. Or maybe set the input + // object to nil. + unstructOut.Object = unstructIn.Object + return nil +} + +func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { + if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { + gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) + if !ok { + // TODO: should this be a typed error? + return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) + } + in.GetObjectKind().SetGroupVersionKind(gvk) + } + return in, nil +} + +func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { + return "", "", errors.New("unstructured cannot convert field labels") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index 7213b7a1..36e769bd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -18,20 +18,13 @@ package unstructured import ( "bytes" - gojson "encoding/json" "errors" "fmt" - "io" - "strings" - - "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" ) @@ -54,39 +47,31 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ runtime.Unstructured = &UnstructuredList{} -func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } -func (obj *UnstructuredList) GetObjectKind() schema.ObjectKind { return obj } - -func (obj *Unstructured) IsUnstructuredObject() {} -func (obj *UnstructuredList) IsUnstructuredObject() {} +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } func (obj *Unstructured) IsList() bool { - if obj.Object != nil { - _, ok := obj.Object["items"] - return ok - } - return false -} -func (obj *UnstructuredList) IsList() bool { return true } - -func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { - if obj.Object == nil { - return fmt.Errorf("content is not a list") - } field, ok := obj.Object["items"] if !ok { - return fmt.Errorf("content is not a list") + return false + } + _, ok = field.([]interface{}) + return ok +} + +func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { + field, ok := obj.Object["items"] + if !ok { + return errors.New("content is not a list") } items, ok := field.([]interface{}) if !ok { - return nil + return fmt.Errorf("content is not a list: %T", field) } for _, item := range items { child, ok := item.(map[string]interface{}) if !ok { - return fmt.Errorf("items member is not an object") + return fmt.Errorf("items member is not an object: %T", child) } if err := fn(&Unstructured{Object: child}); err != nil { return err @@ -95,15 +80,6 @@ func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { return nil } -func (obj *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { - for i := range obj.Items { - if err := fn(&obj.Items[i]); err != nil { - return err - } - } - return nil -} - func (obj *Unstructured) UnstructuredContent() map[string]interface{} { if obj.Object == nil { obj.Object = make(map[string]interface{}) @@ -111,23 +87,8 @@ func (obj *Unstructured) UnstructuredContent() map[string]interface{} { return obj.Object } -// UnstructuredContent returns a map contain an overlay of the Items field onto -// the Object field. Items always overwrites overlay. Changing "items" in the -// returned object will affect items in the underlying Items field, but changing -// the "items" slice itself will have no effect. -// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows -// items to be changed. -func (obj *UnstructuredList) UnstructuredContent() map[string]interface{} { - out := obj.Object - if out == nil { - out = make(map[string]interface{}) - } - items := make([]interface{}, len(obj.Items)) - for i, item := range obj.Items { - items[i] = item.Object - } - out["items"] = items - return out +func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content } // MarshalJSON ensures that the unstructured object produces proper @@ -151,227 +112,61 @@ func (in *Unstructured) DeepCopy() *Unstructured { } out := new(Unstructured) *out = *in - out.Object = unstructured.DeepCopyJSON(in.Object) + out.Object = runtime.DeepCopyJSON(in.Object) return out } -func (in *UnstructuredList) DeepCopy() *UnstructuredList { - if in == nil { - return nil - } - out := new(UnstructuredList) - *out = *in - out.Object = unstructured.DeepCopyJSON(in.Object) - out.Items = make([]Unstructured, len(in.Items)) - for i := range in.Items { - in.Items[i].DeepCopyInto(&out.Items[i]) - } - return out -} - -func getNestedField(obj map[string]interface{}, fields ...string) interface{} { - var val interface{} = obj - for _, field := range fields { - if _, ok := val.(map[string]interface{}); !ok { - return nil - } - val = val.(map[string]interface{})[field] - } - return val -} - -func getNestedString(obj map[string]interface{}, fields ...string) string { - if str, ok := getNestedField(obj, fields...).(string); ok { - return str - } - return "" -} - -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - if str, ok := getNestedField(obj, fields...).(int64); ok { - return str - } - return 0 -} - -func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - nested := getNestedField(obj, fields...) - switch n := nested.(type) { - case int64: - return &n - case *int64: - return n - default: - return nil - } -} - -func getNestedSlice(obj map[string]interface{}, fields ...string) []string { - if m, ok := getNestedField(obj, fields...).([]interface{}); ok { - strSlice := make([]string, 0, len(m)) - for _, v := range m { - if str, ok := v.(string); ok { - strSlice = append(strSlice, str) - } - } - return strSlice - } - return nil -} - -func getNestedMap(obj map[string]interface{}, fields ...string) map[string]string { - if m, ok := getNestedField(obj, fields...).(map[string]interface{}); ok { - strMap := make(map[string]string, len(m)) - for k, v := range m { - if str, ok := v.(string); ok { - strMap[k] = str - } - } - return strMap - } - return nil -} - -func setNestedField(obj map[string]interface{}, value interface{}, fields ...string) { - m := obj - if len(fields) > 1 { - for _, field := range fields[0 : len(fields)-1] { - if _, ok := m[field].(map[string]interface{}); !ok { - m[field] = make(map[string]interface{}) - } - m = m[field].(map[string]interface{}) - } - } - m[fields[len(fields)-1]] = value -} - -func setNestedSlice(obj map[string]interface{}, value []string, fields ...string) { - m := make([]interface{}, 0, len(value)) - for _, v := range value { - m = append(m, v) - } - setNestedField(obj, m, fields...) -} - -func setNestedMap(obj map[string]interface{}, value map[string]string, fields ...string) { - m := make(map[string]interface{}, len(value)) - for k, v := range value { - m[k] = v - } - setNestedField(obj, m, fields...) -} - func (u *Unstructured) setNestedField(value interface{}, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedField(u.Object, value, fields...) + SetNestedField(u.Object, value, fields...) } func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedSlice(u.Object, value, fields...) + SetNestedStringSlice(u.Object, value, fields...) } func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } - setNestedMap(u.Object, value, fields...) -} - -func extractOwnerReference(src interface{}) metav1.OwnerReference { - v := src.(map[string]interface{}) - // though this field is a *bool, but when decoded from JSON, it's - // unmarshalled as bool. - var controllerPtr *bool - controller, ok := (getNestedField(v, "controller")).(bool) - if !ok { - controllerPtr = nil - } else { - controllerCopy := controller - controllerPtr = &controllerCopy - } - var blockOwnerDeletionPtr *bool - blockOwnerDeletion, ok := (getNestedField(v, "blockOwnerDeletion")).(bool) - if !ok { - blockOwnerDeletionPtr = nil - } else { - blockOwnerDeletionCopy := blockOwnerDeletion - blockOwnerDeletionPtr = &blockOwnerDeletionCopy - } - return metav1.OwnerReference{ - Kind: getNestedString(v, "kind"), - Name: getNestedString(v, "name"), - APIVersion: getNestedString(v, "apiVersion"), - UID: (types.UID)(getNestedString(v, "uid")), - Controller: controllerPtr, - BlockOwnerDeletion: blockOwnerDeletionPtr, - } -} - -func setOwnerReference(src metav1.OwnerReference) map[string]interface{} { - ret := make(map[string]interface{}) - setNestedField(ret, src.Kind, "kind") - setNestedField(ret, src.Name, "name") - setNestedField(ret, src.APIVersion, "apiVersion") - setNestedField(ret, string(src.UID), "uid") - // json.Unmarshal() extracts boolean json fields as bool, not as *bool and hence extractOwnerReference() - // expects bool or a missing field, not *bool. So if pointer is nil, fields are omitted from the ret object. - // If pointer is non-nil, they are set to the referenced value. - if src.Controller != nil { - setNestedField(ret, *src.Controller, "controller") - } - if src.BlockOwnerDeletion != nil { - setNestedField(ret, *src.BlockOwnerDeletion, "blockOwnerDeletion") - } - return ret -} - -func getOwnerReferences(object map[string]interface{}) ([]map[string]interface{}, error) { - field := getNestedField(object, "metadata", "ownerReferences") - if field == nil { - return nil, fmt.Errorf("cannot find field metadata.ownerReferences in %v", object) - } - ownerReferences, ok := field.([]map[string]interface{}) - if ok { - return ownerReferences, nil - } - // TODO: This is hacky... - interfaces, ok := field.([]interface{}) - if !ok { - return nil, fmt.Errorf("expect metadata.ownerReferences to be a slice in %#v", object) - } - ownerReferences = make([]map[string]interface{}, 0, len(interfaces)) - for i := 0; i < len(interfaces); i++ { - r, ok := interfaces[i].(map[string]interface{}) - if !ok { - return nil, fmt.Errorf("expect element metadata.ownerReferences to be a map[string]interface{} in %#v", object) - } - ownerReferences = append(ownerReferences, r) - } - return ownerReferences, nil + SetNestedStringMap(u.Object, value, fields...) } func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { - original, err := getOwnerReferences(u.Object) - if err != nil { - glog.V(6).Info(err) + field, ok := nestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + if !ok { + return nil + } + original, ok := field.([]interface{}) + if !ok { return nil } ret := make([]metav1.OwnerReference, 0, len(original)) - for i := 0; i < len(original); i++ { - ret = append(ret, extractOwnerReference(original[i])) + for _, obj := range original { + o, ok := obj.(map[string]interface{}) + if !ok { + // expected map[string]interface{}, got something else + return nil + } + ret = append(ret, extractOwnerReference(o)) } return ret } func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { - var newReferences = make([]map[string]interface{}, 0, len(references)) - for i := 0; i < len(references); i++ { - newReferences = append(newReferences, setOwnerReference(references[i])) + newReferences := make([]interface{}, 0, len(references)) + for _, reference := range references { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err)) + continue + } + newReferences = append(newReferences, out) } u.setNestedField(newReferences, "metadata", "ownerReferences") } @@ -433,7 +228,11 @@ func (u *Unstructured) SetResourceVersion(version string) { } func (u *Unstructured) GetGeneration() int64 { - return getNestedInt64(u.Object, "metadata", "generation") + val, ok := NestedInt64(u.Object, "metadata", "generation") + if !ok { + return 0 + } + return val } func (u *Unstructured) SetGeneration(generation int64) { @@ -464,6 +263,10 @@ func (u *Unstructured) GetCreationTimestamp() metav1.Time { func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { ts, _ := timestamp.MarshalQueryParameter() + if len(ts) == 0 || timestamp.Time.IsZero() { + RemoveNestedField(u.Object, "metadata", "creationTimestamp") + return + } u.setNestedField(ts, "metadata", "creationTimestamp") } @@ -478,7 +281,7 @@ func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { if timestamp == nil { - u.setNestedField(nil, "metadata", "deletionTimestamp") + RemoveNestedField(u.Object, "metadata", "deletionTimestamp") return } ts, _ := timestamp.MarshalQueryParameter() @@ -486,15 +289,24 @@ func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { } func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "deletionGracePeriodSeconds") + val, ok := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !ok { + return nil + } + return &val } func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { - u.setNestedField(deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") + if deletionGracePeriodSeconds == nil { + RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds") + return + } + u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") } func (u *Unstructured) GetLabels() map[string]string { - return getNestedMap(u.Object, "metadata", "labels") + m, _ := NestedStringMap(u.Object, "metadata", "labels") + return m } func (u *Unstructured) SetLabels(labels map[string]string) { @@ -502,7 +314,8 @@ func (u *Unstructured) SetLabels(labels map[string]string) { } func (u *Unstructured) GetAnnotations() map[string]string { - return getNestedMap(u.Object, "metadata", "annotations") + m, _ := NestedStringMap(u.Object, "metadata", "annotations") + return m } func (u *Unstructured) SetAnnotations(annotations map[string]string) { @@ -523,41 +336,34 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } -var converter = unstructured.NewConverter(false) - func (u *Unstructured) GetInitializers() *metav1.Initializers { - field := getNestedField(u.Object, "metadata", "initializers") - if field == nil { - return nil - } - obj, ok := field.(map[string]interface{}) + m, ok := nestedMapNoCopy(u.Object, "metadata", "initializers") if !ok { return nil } out := &metav1.Initializers{} - if err := converter.FromUnstructured(obj, out); err != nil { + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + return nil } return out } func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } if initializers == nil { - setNestedField(u.Object, nil, "metadata", "initializers") + RemoveNestedField(u.Object, "metadata", "initializers") return } - out, err := converter.ToUnstructured(initializers) + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) } - setNestedField(u.Object, out, "metadata", "initializers") + u.setNestedField(out, "metadata", "initializers") } func (u *Unstructured) GetFinalizers() []string { - return getNestedSlice(u.Object, "metadata", "finalizers") + val, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + return val } func (u *Unstructured) SetFinalizers(finalizers []string) { @@ -571,272 +377,3 @@ func (u *Unstructured) GetClusterName() string { func (u *Unstructured) SetClusterName(clusterName string) { u.setNestedField(clusterName, "metadata", "clusterName") } - -// UnstructuredList allows lists that do not have Golang structs -// registered to be manipulated generically. This can be used to deal -// with the API lists from a plug-in. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:deepcopy-gen=true -type UnstructuredList struct { - Object map[string]interface{} - - // Items is a list of unstructured objects. - Items []Unstructured `json:"items"` -} - -var _ metav1.ListInterface = &UnstructuredList{} - -// MarshalJSON ensures that the unstructured list object produces proper -// JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - err := UnstructuredJSONScheme.Encode(u, &buf) - return buf.Bytes(), err -} - -// UnmarshalJSON ensures that the unstructured list object properly -// decodes JSON when passed to Go's standard JSON library. -func (u *UnstructuredList) UnmarshalJSON(b []byte) error { - _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) - return err -} - -func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - setNestedField(u.Object, value, fields...) -} - -func (u *UnstructuredList) GetAPIVersion() string { - return getNestedString(u.Object, "apiVersion") -} - -func (u *UnstructuredList) SetAPIVersion(version string) { - u.setNestedField(version, "apiVersion") -} - -func (u *UnstructuredList) GetKind() string { - return getNestedString(u.Object, "kind") -} - -func (u *UnstructuredList) SetKind(kind string) { - u.setNestedField(kind, "kind") -} - -func (u *UnstructuredList) GetResourceVersion() string { - return getNestedString(u.Object, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) SetResourceVersion(version string) { - u.setNestedField(version, "metadata", "resourceVersion") -} - -func (u *UnstructuredList) GetSelfLink() string { - return getNestedString(u.Object, "metadata", "selfLink") -} - -func (u *UnstructuredList) SetSelfLink(selfLink string) { - u.setNestedField(selfLink, "metadata", "selfLink") -} - -func (u *UnstructuredList) GetContinue() string { - return getNestedString(u.Object, "metadata", "continue") -} - -func (u *UnstructuredList) SetContinue(c string) { - u.setNestedField(c, "metadata", "continue") -} - -func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { - u.SetAPIVersion(gvk.GroupVersion().String()) - u.SetKind(gvk.Kind) -} - -func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { - gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) - if err != nil { - return schema.GroupVersionKind{} - } - gvk := gv.WithKind(u.GetKind()) - return gvk -} - -// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured -// type, which can be used for generic access to objects without a predefined scheme. -// TODO: move into serializer/json. -var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} - -type unstructuredJSONScheme struct{} - -func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - var err error - if obj != nil { - err = s.decodeInto(data, obj) - } else { - obj, err = s.decode(data) - } - - if err != nil { - return nil, nil, err - } - - gvk := obj.GetObjectKind().GroupVersionKind() - if len(gvk.Kind) == 0 { - return nil, &gvk, runtime.NewMissingKindErr(string(data)) - } - - return obj, &gvk, nil -} - -func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { - switch t := obj.(type) { - case *Unstructured: - return json.NewEncoder(w).Encode(t.Object) - case *UnstructuredList: - items := make([]map[string]interface{}, 0, len(t.Items)) - for _, i := range t.Items { - items = append(items, i.Object) - } - listObj := make(map[string]interface{}, len(t.Object)+1) - for k, v := range t.Object { // Make a shallow copy - listObj[k] = v - } - listObj["items"] = items - return json.NewEncoder(w).Encode(listObj) - case *runtime.Unknown: - // TODO: Unstructured needs to deal with ContentType. - _, err := w.Write(t.Raw) - return err - default: - return json.NewEncoder(w).Encode(t) - } -} - -func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { - type detector struct { - Items gojson.RawMessage - } - var det detector - if err := json.Unmarshal(data, &det); err != nil { - return nil, err - } - - if det.Items != nil { - list := &UnstructuredList{} - err := s.decodeToList(data, list) - return list, err - } - - // No Items field, so it wasn't a list. - unstruct := &Unstructured{} - err := s.decodeToUnstructured(data, unstruct) - return unstruct, err -} - -func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { - switch x := obj.(type) { - case *Unstructured: - return s.decodeToUnstructured(data, x) - case *UnstructuredList: - return s.decodeToList(data, x) - case *runtime.VersionedObjects: - o, err := s.decode(data) - if err == nil { - x.Objects = []runtime.Object{o} - } - return err - default: - return json.Unmarshal(data, x) - } -} - -func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { - m := make(map[string]interface{}) - if err := json.Unmarshal(data, &m); err != nil { - return err - } - - unstruct.Object = m - - return nil -} - -func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { - type decodeList struct { - Items []gojson.RawMessage - } - - var dList decodeList - if err := json.Unmarshal(data, &dList); err != nil { - return err - } - - if err := json.Unmarshal(data, &list.Object); err != nil { - return err - } - - // For typed lists, e.g., a PodList, API server doesn't set each item's - // APIVersion and Kind. We need to set it. - listAPIVersion := list.GetAPIVersion() - listKind := list.GetKind() - itemKind := strings.TrimSuffix(listKind, "List") - - delete(list.Object, "items") - list.Items = nil - for _, i := range dList.Items { - unstruct := &Unstructured{} - if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { - return err - } - // This is hacky. Set the item's Kind and APIVersion to those inferred - // from the List. - if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { - unstruct.SetKind(itemKind) - unstruct.SetAPIVersion(listAPIVersion) - } - list.Items = append(list.Items, *unstruct) - } - return nil -} - -// UnstructuredObjectConverter is an ObjectConverter for use with -// Unstructured objects. Since it has no schema or type information, -// it will only succeed for no-op conversions. This is provided as a -// sane implementation for APIs that require an object converter. -type UnstructuredObjectConverter struct{} - -func (UnstructuredObjectConverter) Convert(in, out, context interface{}) error { - unstructIn, ok := in.(*Unstructured) - if !ok { - return fmt.Errorf("input type %T in not valid for unstructured conversion", in) - } - - unstructOut, ok := out.(*Unstructured) - if !ok { - return fmt.Errorf("output type %T in not valid for unstructured conversion", out) - } - - // maybe deep copy the map? It is documented in the - // ObjectConverter interface that this function is not - // guaranteeed to not mutate the input. Or maybe set the input - // object to nil. - unstructOut.Object = unstructIn.Object - return nil -} - -func (UnstructuredObjectConverter) ConvertToVersion(in runtime.Object, target runtime.GroupVersioner) (runtime.Object, error) { - if kind := in.GetObjectKind().GroupVersionKind(); !kind.Empty() { - gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{kind}) - if !ok { - // TODO: should this be a typed error? - return nil, fmt.Errorf("%v is unstructured and is not suitable for converting to %q", kind, target) - } - in.GetObjectKind().SetGroupVersionKind(gvk) - } - return in, nil -} - -func (UnstructuredObjectConverter) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { - return "", "", errors.New("unstructured cannot convert field labels") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go new file mode 100644 index 00000000..57d78a09 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -0,0 +1,189 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.Unstructured = &UnstructuredList{} +var _ metav1.ListInterface = &UnstructuredList{} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u } + +func (u *UnstructuredList) IsList() bool { return true } + +func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&u.Items[i]); err != nil { + return err + } + } + return nil +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. Changing "items" in the +// returned object will affect items in the underlying Items field, but changing +// the "items" slice itself will have no effect. +// TODO: expose SetUnstructuredContent on runtime.Unstructured that allows +// items to be changed. +func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := u.Object + if out == nil { + out = make(map[string]interface{}) + } + items := make([]interface{}, len(u.Items)) + for i, item := range u.Items { + items[i] = item.Object + } + out["items"] = items + return out +} + +// SetUnstructuredContent obeys the conventions of List and keeps Items and the items +// array in sync. If items is not an array of objects in the incoming map, then any +// mismatched item will be removed. +func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content + if content == nil { + obj.Items = nil + return + } + items, ok := obj.Object["items"].([]interface{}) + if !ok || items == nil { + items = []interface{}{} + } + unstructuredItems := make([]Unstructured, 0, len(items)) + newItems := make([]interface{}, 0, len(items)) + for _, item := range items { + o, ok := item.(map[string]interface{}) + if !ok { + continue + } + unstructuredItems = append(unstructuredItems, Unstructured{Object: o}) + newItems = append(newItems, o) + } + obj.Items = unstructuredItems + obj.Object["items"] = newItems +} + +func (u *UnstructuredList) DeepCopy() *UnstructuredList { + if u == nil { + return nil + } + out := new(UnstructuredList) + *out = *u + out.Object = runtime.DeepCopyJSON(u.Object) + out.Items = make([]Unstructured, len(u.Items)) + for i := range u.Items { + u.Items[i].DeepCopyInto(&out.Items[i]) + } + return out +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *UnstructuredList) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go index 248acf98..8f6a17bf 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -21,27 +21,9 @@ limitations under the License. package unstructured import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unstructured).DeepCopyInto(out.(*Unstructured)) - return nil - }, InType: reflect.TypeOf(&Unstructured{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UnstructuredList).DeepCopyInto(out.(*UnstructuredList)) - return nil - }, InType: reflect.TypeOf(&UnstructuredList{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Unstructured) DeepCopyInto(out *Unstructured) { clone := in.DeepCopy() diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index c73e777b..ed458550 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -21,164 +21,10 @@ limitations under the License. package v1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroup).DeepCopyInto(out.(*APIGroup)) - return nil - }, InType: reflect.TypeOf(&APIGroup{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIGroupList).DeepCopyInto(out.(*APIGroupList)) - return nil - }, InType: reflect.TypeOf(&APIGroupList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResource).DeepCopyInto(out.(*APIResource)) - return nil - }, InType: reflect.TypeOf(&APIResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIResourceList).DeepCopyInto(out.(*APIResourceList)) - return nil - }, InType: reflect.TypeOf(&APIResourceList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersions).DeepCopyInto(out.(*APIVersions)) - return nil - }, InType: reflect.TypeOf(&APIVersions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Duration).DeepCopyInto(out.(*Duration)) - return nil - }, InType: reflect.TypeOf(&Duration{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExportOptions).DeepCopyInto(out.(*ExportOptions)) - return nil - }, InType: reflect.TypeOf(&ExportOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GetOptions).DeepCopyInto(out.(*GetOptions)) - return nil - }, InType: reflect.TypeOf(&GetOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupKind).DeepCopyInto(out.(*GroupKind)) - return nil - }, InType: reflect.TypeOf(&GroupKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupResource).DeepCopyInto(out.(*GroupResource)) - return nil - }, InType: reflect.TypeOf(&GroupResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersion).DeepCopyInto(out.(*GroupVersion)) - return nil - }, InType: reflect.TypeOf(&GroupVersion{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionForDiscovery).DeepCopyInto(out.(*GroupVersionForDiscovery)) - return nil - }, InType: reflect.TypeOf(&GroupVersionForDiscovery{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionKind).DeepCopyInto(out.(*GroupVersionKind)) - return nil - }, InType: reflect.TypeOf(&GroupVersionKind{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupVersionResource).DeepCopyInto(out.(*GroupVersionResource)) - return nil - }, InType: reflect.TypeOf(&GroupVersionResource{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializer).DeepCopyInto(out.(*Initializer)) - return nil - }, InType: reflect.TypeOf(&Initializer{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Initializers).DeepCopyInto(out.(*Initializers)) - return nil - }, InType: reflect.TypeOf(&Initializers{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*InternalEvent).DeepCopyInto(out.(*InternalEvent)) - return nil - }, InType: reflect.TypeOf(&InternalEvent{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelector).DeepCopyInto(out.(*LabelSelector)) - return nil - }, InType: reflect.TypeOf(&LabelSelector{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LabelSelectorRequirement).DeepCopyInto(out.(*LabelSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&LabelSelectorRequirement{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListMeta).DeepCopyInto(out.(*ListMeta)) - return nil - }, InType: reflect.TypeOf(&ListMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*MicroTime).DeepCopyInto(out.(*MicroTime)) - return nil - }, InType: reflect.TypeOf(&MicroTime{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*OwnerReference).DeepCopyInto(out.(*OwnerReference)) - return nil - }, InType: reflect.TypeOf(&OwnerReference{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Patch).DeepCopyInto(out.(*Patch)) - return nil - }, InType: reflect.TypeOf(&Patch{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RootPaths).DeepCopyInto(out.(*RootPaths)) - return nil - }, InType: reflect.TypeOf(&RootPaths{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServerAddressByClientCIDR).DeepCopyInto(out.(*ServerAddressByClientCIDR)) - return nil - }, InType: reflect.TypeOf(&ServerAddressByClientCIDR{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Status).DeepCopyInto(out.(*Status)) - return nil - }, InType: reflect.TypeOf(&Status{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusCause).DeepCopyInto(out.(*StatusCause)) - return nil - }, InType: reflect.TypeOf(&StatusCause{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StatusDetails).DeepCopyInto(out.(*StatusDetails)) - return nil - }, InType: reflect.TypeOf(&StatusDetails{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Time).DeepCopyInto(out.(*Time)) - return nil - }, InType: reflect.TypeOf(&Time{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Timestamp).DeepCopyInto(out.(*Timestamp)) - return nil - }, InType: reflect.TypeOf(&Timestamp{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WatchEvent).DeepCopyInto(out.(*WatchEvent)) - return nil - }, InType: reflect.TypeOf(&WatchEvent{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIGroup) DeepCopyInto(out *APIGroup) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go index 043456cd..4ae545d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1alpha1/zz_generated.deepcopy.go @@ -21,47 +21,9 @@ limitations under the License. package v1alpha1 import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadata).DeepCopyInto(out.(*PartialObjectMetadata)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadata{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PartialObjectMetadataList).DeepCopyInto(out.(*PartialObjectMetadataList)) - return nil - }, InType: reflect.TypeOf(&PartialObjectMetadataList{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Table).DeepCopyInto(out.(*Table)) - return nil - }, InType: reflect.TypeOf(&Table{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableColumnDefinition).DeepCopyInto(out.(*TableColumnDefinition)) - return nil - }, InType: reflect.TypeOf(&TableColumnDefinition{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableOptions).DeepCopyInto(out.(*TableOptions)) - return nil - }, InType: reflect.TypeOf(&TableOptions{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRow).DeepCopyInto(out.(*TableRow)) - return nil - }, InType: reflect.TypeOf(&TableRow{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TableRowCondition).DeepCopyInto(out.(*TableRowCondition)) - return nil - }, InType: reflect.TypeOf(&TableRowCondition{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go b/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go deleted file mode 100644 index c5dec1f3..00000000 --- a/vendor/k8s.io/apimachinery/pkg/conversion/cloner.go +++ /dev/null @@ -1,249 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package conversion - -import ( - "fmt" - "reflect" -) - -// Cloner knows how to copy one type to another. -type Cloner struct { - // Map from the type to a function which can do the deep copy. - deepCopyFuncs map[reflect.Type]reflect.Value - generatedDeepCopyFuncs map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error -} - -// NewCloner creates a new Cloner object. -func NewCloner() *Cloner { - c := &Cloner{ - deepCopyFuncs: map[reflect.Type]reflect.Value{}, - generatedDeepCopyFuncs: map[reflect.Type]func(in interface{}, out interface{}, c *Cloner) error{}, - } - if err := c.RegisterDeepCopyFunc(byteSliceDeepCopy); err != nil { - // If one of the deep-copy functions is malformed, detect it immediately. - panic(err) - } - return c -} - -// Prevent recursing into every byte... -func byteSliceDeepCopy(in *[]byte, out *[]byte, c *Cloner) error { - if *in != nil { - *out = make([]byte, len(*in)) - copy(*out, *in) - } else { - *out = nil - } - return nil -} - -// Verifies whether a deep-copy function has a correct signature. -func verifyDeepCopyFunctionSignature(ft reflect.Type) error { - if ft.Kind() != reflect.Func { - return fmt.Errorf("expected func, got: %v", ft) - } - if ft.NumIn() != 3 { - return fmt.Errorf("expected three 'in' params, got %v", ft) - } - if ft.NumOut() != 1 { - return fmt.Errorf("expected one 'out' param, got %v", ft) - } - if ft.In(0).Kind() != reflect.Ptr { - return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) - } - if ft.In(1) != ft.In(0) { - return fmt.Errorf("expected 'in' param 0 the same as param 1, got: %v", ft) - } - var forClonerType Cloner - if expected := reflect.TypeOf(&forClonerType); ft.In(2) != expected { - return fmt.Errorf("expected '%v' arg for 'in' param 2, got: '%v'", expected, ft.In(2)) - } - var forErrorType error - // This convolution is necessary, otherwise TypeOf picks up on the fact - // that forErrorType is nil - errorType := reflect.TypeOf(&forErrorType).Elem() - if ft.Out(0) != errorType { - return fmt.Errorf("expected error return, got: %v", ft) - } - return nil -} - -// RegisterGeneratedDeepCopyFunc registers a copying func with the Cloner. -// deepCopyFunc must take three parameters: a type input, a pointer to a -// type output, and a pointer to Cloner. It should return an error. -// -// Example: -// c.RegisterGeneratedDeepCopyFunc( -// func(in Pod, out *Pod, c *Cloner) error { -// // deep copy logic... -// return nil -// }) -func (c *Cloner) RegisterDeepCopyFunc(deepCopyFunc interface{}) error { - fv := reflect.ValueOf(deepCopyFunc) - ft := fv.Type() - if err := verifyDeepCopyFunctionSignature(ft); err != nil { - return err - } - c.deepCopyFuncs[ft.In(0)] = fv - return nil -} - -// GeneratedDeepCopyFunc bundles an untyped generated deep-copy function of a type -// with a reflection type object used as a key to lookup the deep-copy function. -type GeneratedDeepCopyFunc struct { - Fn func(in interface{}, out interface{}, c *Cloner) error - InType reflect.Type -} - -// Similar to RegisterDeepCopyFunc, but registers deep copy function that were -// automatically generated. -func (c *Cloner) RegisterGeneratedDeepCopyFunc(fn GeneratedDeepCopyFunc) error { - c.generatedDeepCopyFuncs[fn.InType] = fn.Fn - return nil -} - -// DeepCopy will perform a deep copy of a given object. -func (c *Cloner) DeepCopy(in interface{}) (interface{}, error) { - // Can be invalid if we run DeepCopy(X) where X is a nil interface type. - // For example, we get an invalid value when someone tries to deep-copy - // a nil labels.Selector. - // This does not occur if X is nil and is a pointer to a concrete type. - if in == nil { - return nil, nil - } - inValue := reflect.ValueOf(in) - outValue, err := c.deepCopy(inValue) - if err != nil { - return nil, err - } - return outValue.Interface(), nil -} - -func (c *Cloner) deepCopy(src reflect.Value) (reflect.Value, error) { - inType := src.Type() - - switch src.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - if src.IsNil() { - return src, nil - } - } - - if fv, ok := c.deepCopyFuncs[inType]; ok { - return c.customDeepCopy(src, fv) - } - if fv, ok := c.generatedDeepCopyFuncs[inType]; ok { - var outValue reflect.Value - outValue = reflect.New(inType.Elem()) - err := fv(src.Interface(), outValue.Interface(), c) - return outValue, err - } - return c.defaultDeepCopy(src) -} - -func (c *Cloner) customDeepCopy(src, fv reflect.Value) (reflect.Value, error) { - outValue := reflect.New(src.Type().Elem()) - args := []reflect.Value{src, outValue, reflect.ValueOf(c)} - result := fv.Call(args)[0].Interface() - // This convolution is necessary because nil interfaces won't convert - // to error. - if result == nil { - return outValue, nil - } - return outValue, result.(error) -} - -func (c *Cloner) defaultDeepCopy(src reflect.Value) (reflect.Value, error) { - switch src.Kind() { - case reflect.Chan, reflect.Func, reflect.UnsafePointer, reflect.Uintptr: - return src, fmt.Errorf("cannot deep copy kind: %s", src.Kind()) - case reflect.Array: - dst := reflect.New(src.Type()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst.Elem().Index(i).Set(copyVal) - } - return dst.Elem(), nil - case reflect.Interface: - if src.IsNil() { - return src, nil - } - return c.deepCopy(src.Elem()) - case reflect.Map: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeMap(src.Type()) - for _, k := range src.MapKeys() { - copyVal, err := c.deepCopy(src.MapIndex(k)) - if err != nil { - return src, err - } - dst.SetMapIndex(k, copyVal) - } - return dst, nil - case reflect.Ptr: - if src.IsNil() { - return src, nil - } - dst := reflect.New(src.Type().Elem()) - copyVal, err := c.deepCopy(src.Elem()) - if err != nil { - return src, err - } - dst.Elem().Set(copyVal) - return dst, nil - case reflect.Slice: - if src.IsNil() { - return src, nil - } - dst := reflect.MakeSlice(src.Type(), 0, src.Len()) - for i := 0; i < src.Len(); i++ { - copyVal, err := c.deepCopy(src.Index(i)) - if err != nil { - return src, err - } - dst = reflect.Append(dst, copyVal) - } - return dst, nil - case reflect.Struct: - dst := reflect.New(src.Type()) - for i := 0; i < src.NumField(); i++ { - if !dst.Elem().Field(i).CanSet() { - // Can't set private fields. At this point, the - // best we can do is a shallow copy. For - // example, time.Time is a value type with - // private members that can be shallow copied. - return src, nil - } - copyVal, err := c.deepCopy(src.Field(i)) - if err != nil { - return src, err - } - dst.Elem().Field(i).Set(copyVal) - } - return dst.Elem(), nil - - default: - // Value types like numbers, booleans, and strings. - return src, nil - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go index 80ba3fb7..823ef32a 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package labels -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Requirement).DeepCopyInto(out.(*Requirement)) - return nil - }, InType: reflect.TypeOf(&Requirement{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Requirement) DeepCopyInto(out *Requirement) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index d9748f06..5b3080aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -139,6 +139,7 @@ func NewParameterCodec(scheme *Scheme) ParameterCodec { typer: scheme, convertor: scheme, creator: scheme, + defaulter: scheme, } } @@ -147,6 +148,7 @@ type parameterCodec struct { typer ObjectTyper convertor ObjectConvertor creator ObjectCreater + defaulter ObjectDefaulter } var _ ParameterCodec = ¶meterCodec{} @@ -163,9 +165,17 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro } for i := range targetGVKs { if targetGVKs[i].GroupVersion() == from { - return c.convertor.Convert(¶meters, into, nil) + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil } } + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) if err != nil { return err @@ -173,6 +183,10 @@ func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.Gro if err := c.convertor.Convert(¶meters, input, nil); err != nil { return err } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } return c.convertor.Convert(input, into, nil) } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go similarity index 90% rename from vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go rename to vendor/k8s.io/apimachinery/pkg/runtime/converter.go index d0e625d2..f6f7c10d 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/unstructured/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package unstructured +package runtime import ( "bytes" @@ -27,19 +27,18 @@ import ( "strings" "sync" "sync/atomic" + "time" - apiequality "k8s.io/apimachinery/pkg/api/equality" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/diff" + "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "github.com/golang/glog" ) -// Converter is an interface for converting between interface{} +// UnstructuredConverter is an interface for converting between interface{} // and map[string]interface representation. -type Converter interface { +type UnstructuredConverter interface { ToUnstructured(obj interface{}) (map[string]interface{}, error) FromUnstructured(u map[string]interface{}, obj interface{}) error } @@ -78,7 +77,16 @@ var ( float64Type = reflect.TypeOf(float64(0)) boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() - DefaultConverter = NewConverter(parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR"))) + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } ) func parseBool(key string) bool { @@ -92,24 +100,30 @@ func parseBool(key string) bool { return value } -// ConverterImpl knows how to convert between interface{} and +// unstructuredConverter knows how to convert between interface{} and // Unstructured in both ways. -type converterImpl struct { +type unstructuredConverter struct { // If true, we will be additionally running conversion via json // to ensure that the result is true. // This is supposed to be set only in tests. mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities } -func NewConverter(mismatchDetection bool) Converter { - return &converterImpl{ - mismatchDetection: mismatchDetection, +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, } } // FromUnstructured converts an object from map[string]interface{} representation into a concrete type. // It uses encoding/json/Unmarshaler if object implements it or reflection if not. -func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface{}) error { +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) if t.Kind() != reflect.Ptr || value.IsNil() { @@ -122,8 +136,8 @@ func (c *converterImpl) FromUnstructured(u map[string]interface{}, obj interface if (err != nil) != (newErr != nil) { glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } - if err == nil && !apiequality.Semantic.DeepEqual(obj, newObj) { - glog.Fatalf("FromUnstructured mismatch for %#v, diff: %v", obj, diff.ObjectReflectDiff(obj, newObj)) + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -393,11 +407,12 @@ func interfaceFromUnstructured(sv, dv reflect.Value) error { // ToUnstructured converts an object into map[string]interface{} representation. // It uses encoding/json/Marshaler if object implements it or reflection if not. -func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, error) { +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { var u map[string]interface{} var err error - if unstr, ok := obj.(runtime.Unstructured); ok { - u = DeepCopyJSON(unstr.UnstructuredContent()) + if unstr, ok := obj.(Unstructured); ok { + // UnstructuredContent() mutates the object so we need to make a copy first + u = unstr.DeepCopyObject().(Unstructured).UnstructuredContent() } else { t := reflect.TypeOf(obj) value := reflect.ValueOf(obj) @@ -413,8 +428,8 @@ func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, if (err != nil) != (newErr != nil) { glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } - if err == nil && !apiequality.Semantic.DeepEqual(u, newUnstr) { - glog.Fatalf("ToUnstructured mismatch for %#v, diff: %v", u, diff.ObjectReflectDiff(u, newUnstr)) + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { @@ -426,21 +441,23 @@ func (c *converterImpl) ToUnstructured(obj interface{}) (map[string]interface{}, // DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains // types produced by json.Unmarshal(). func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { - return deepCopyJSON(x).(map[string]interface{}) + return DeepCopyJSONValue(x).(map[string]interface{}) } -func deepCopyJSON(x interface{}) interface{} { +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal(). +func DeepCopyJSONValue(x interface{}) interface{} { switch x := x.(type) { case map[string]interface{}: clone := make(map[string]interface{}, len(x)) for k, v := range x { - clone[k] = deepCopyJSON(v) + clone[k] = DeepCopyJSONValue(v) } return clone case []interface{}: clone := make([]interface{}, len(x)) for i, v := range x { - clone[i] = deepCopyJSON(v) + clone[i] = DeepCopyJSONValue(v) } return clone case string, int64, bool, float64, nil, encodingjson.Number: diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index 21a35570..86b24840 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -94,8 +94,6 @@ type missingVersionErr struct { data string } -// IsMissingVersion returns true if the error indicates that the provided object -// is missing a 'Version' field. func NewMissingVersionErr(data string) error { return &missingVersionErr{data} } @@ -104,6 +102,8 @@ func (k *missingVersionErr) Error() string { return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) } +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. func IsMissingVersion(err error) bool { if err == nil { return false diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index c90eef5a..9d00f165 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -233,13 +233,13 @@ type Object interface { // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { - // IsUnstructuredObject is a marker interface to allow objects that can be serialized but not introspected - // to bypass conversion. - IsUnstructuredObject() + Object // UnstructuredContent returns a non-nil, mutable map of the contents of this object. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) // IsList returns true if this type is a list or matches the list convention - has an array called "items". IsList() bool // EachListItem should pass a single item out of the list as an Object to the provided function. Any diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index c597fcf9..08b75538 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -68,10 +68,6 @@ type Scheme struct { // converter stores all registered conversion functions. It also has // default coverting behavior. converter *conversion.Converter - - // cloner stores all registered copy functions. It also has default - // deep copy behavior. - cloner *conversion.Cloner } // Function to convert a field selector to internal representation. @@ -80,11 +76,10 @@ type FieldLabelConversionFunc func(label, value string) (internalLabel, internal // NewScheme creates a new Scheme. This scheme is pluggable by default. func NewScheme() *Scheme { s := &Scheme{ - gvkToType: map[schema.GroupVersionKind]reflect.Type{}, - typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, - unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, - unversionedKinds: map[string]reflect.Type{}, - cloner: conversion.NewCloner(), + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, fieldLabelConversionFuncs: map[string]map[string]FieldLabelConversionFunc{}, defaulterFuncs: map[reflect.Type]func(interface{}){}, } @@ -222,19 +217,22 @@ func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { return s.gvkToType } -// ObjectKind returns the group,version,kind of the go object and true if this object -// is considered unversioned, or an error if it's not a pointer or is unregistered. -func (s *Scheme) ObjectKind(obj Object) (schema.GroupVersionKind, bool, error) { - gvks, unversionedType, err := s.ObjectKinds(obj) - if err != nil { - return schema.GroupVersionKind{}, false, err - } - return gvks[0], unversionedType, nil -} - // ObjectKinds returns all possible group,version,kind of the go object, true if the // object is considered unversioned, or an error if it's not a pointer or is unregistered. func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + v, err := conversion.EnforcePtr(obj) if err != nil { return nil, false, err @@ -343,7 +341,7 @@ func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { return nil } -// Similar to AddConversionFuncs, but registers conversion functions that were +// AddGeneratedConversionFuncs registers conversion functions that were // automatically generated. func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) error { for _, f := range conversionFuncs { @@ -354,29 +352,6 @@ func (s *Scheme) AddGeneratedConversionFuncs(conversionFuncs ...interface{}) err return nil } -// AddDeepCopyFuncs adds a function to the list of deep-copy functions. -// For the expected format of deep-copy function, see the comment for -// Copier.RegisterDeepCopyFunction. -func (s *Scheme) AddDeepCopyFuncs(deepCopyFuncs ...interface{}) error { - for _, f := range deepCopyFuncs { - if err := s.cloner.RegisterDeepCopyFunc(f); err != nil { - return err - } - } - return nil -} - -// Similar to AddDeepCopyFuncs, but registers deep-copy functions that were -// automatically generated. -func (s *Scheme) AddGeneratedDeepCopyFuncs(deepCopyFuncs ...conversion.GeneratedDeepCopyFunc) error { - for _, fn := range deepCopyFuncs { - if err := s.cloner.RegisterGeneratedDeepCopyFunc(fn); err != nil { - return err - } - } - return nil -} - // AddFieldLabelConversionFunc adds a conversion function to convert field selectors // of the given kind from the given version to internal version representation. func (s *Scheme) AddFieldLabelConversionFunc(version, kind string, conversionFunc FieldLabelConversionFunc) error { @@ -424,10 +399,68 @@ func (s *Scheme) Default(src Object) { // testing of conversion functions. Returns an error if the conversion isn't // possible. You can call this with types that haven't been registered (for example, // a to test conversion of types that are nested within registered types). The -// context interface is passed to the convertor. -// TODO: identify whether context should be hidden, or behind a formal context/scope -// interface +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + flags, meta := s.generateConvertMeta(in) meta.Context = context if flags == 0 { @@ -436,8 +469,8 @@ func (s *Scheme) Convert(in, out interface{}, context interface{}) error { return s.converter.Convert(in, out, flags, meta) } -// Converts the given field label and value for an kind field selector from -// versioned representation to an unversioned one. +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. func (s *Scheme) ConvertFieldLabel(version, kind, label, value string) (string, string, error) { if s.fieldLabelConversionFuncs[version] == nil { return DefaultMetaV1FieldSelectorConversion(label, value) @@ -467,15 +500,30 @@ func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Objec // convertToVersion handles conversion with an optional copy. func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { - // determine the incoming kinds with as few allocations as possible. - t := reflect.TypeOf(in) - if t.Kind() != reflect.Ptr { - return nil, fmt.Errorf("only pointer types may be converted: %v", t) - } - t = t.Elem() - if t.Kind() != reflect.Struct { - return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } } + kinds, ok := s.typeToGVK[t] if !ok || len(kinds) == 0 { return nil, NewNotRegisteredErrForType(t) @@ -491,7 +539,6 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( } return copyAndSetTargetKind(copy, in, unversionedKind) } - return nil, NewNotRegisteredErrForTarget(t, target) } @@ -529,6 +576,25 @@ func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) ( return out, nil } +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + // generateConvertMeta constructs the meta value we pass to Convert. func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { return s.converter.DefaultMeta(reflect.TypeOf(in)) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index ce3d77c2..8a217f32 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -150,9 +150,10 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i } if into != nil { + _, isUnstructured := into.(runtime.Unstructured) types, _, err := s.typer.ObjectKinds(into) switch { - case runtime.IsNotRegisteredError(err): + case runtime.IsNotRegisteredError(err), isUnstructured: if err := jsoniter.ConfigFastest.Unmarshal(data, into); err != nil { return nil, actual, err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index d347461a..929c67a9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -20,31 +20,6 @@ limitations under the License. package runtime -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RawExtension).DeepCopyInto(out.(*RawExtension)) - return nil - }, InType: reflect.TypeOf(&RawExtension{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Unknown).DeepCopyInto(out.(*Unknown)) - return nil - }, InType: reflect.TypeOf(&Unknown{})}, - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VersionedObjects).DeepCopyInto(out.(*VersionedObjects)) - return nil - }, InType: reflect.TypeOf(&VersionedObjects{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RawExtension) DeepCopyInto(out *RawExtension) { *out = *in diff --git a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go index ac3c1e8c..16501d5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/mergepatch/errors.go @@ -29,6 +29,7 @@ var ( ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys") ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list") ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list") + ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported") ) func ErrNoMergeKey(m map[string]interface{}, k string) error { diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index b544a60a..bc2a531b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -276,17 +276,7 @@ func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error } return func(req *http.Request) (*url.URL, error) { - host := req.URL.Host - // for some urls, the Host is already the host, not the host:port - if net.ParseIP(host) == nil { - var err error - host, _, err = net.SplitHostPort(req.URL.Host) - if err != nil { - return delegate(req) - } - } - - ip := net.ParseIP(host) + ip := net.ParseIP(req.URL.Hostname()) if ip == nil { return delegate(req) } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go new file mode 100644 index 00000000..ab66d045 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/errors.go @@ -0,0 +1,49 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "fmt" +) + +type LookupPatchMetaError struct { + Path string + Err error +} + +func (e LookupPatchMetaError) Error() string { + return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err) +} + +type FieldNotFoundError struct { + Path string + Field string +} + +func (e FieldNotFoundError) Error() string { + return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path) +} + +type InvalidTypeError struct { + Path string + Expected string + Actual string +} + +func (e InvalidTypeError) Error() string { + return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go new file mode 100644 index 00000000..c31de15e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/meta.go @@ -0,0 +1,194 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/util/mergepatch" + forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +type PatchMeta struct { + patchStrategies []string + patchMergeKey string +} + +func (pm PatchMeta) GetPatchStrategies() []string { + if pm.patchStrategies == nil { + return []string{} + } + return pm.patchStrategies +} + +func (pm PatchMeta) SetPatchStrategies(ps []string) { + pm.patchStrategies = ps +} + +func (pm PatchMeta) GetPatchMergeKey() string { + return pm.patchMergeKey +} + +func (pm PatchMeta) SetPatchMergeKey(pmk string) { + pm.patchMergeKey = pmk +} + +type LookupPatchMeta interface { + // LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map. + LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) + // LookupPatchMetadataForSlice get subschema and the patch metadata for slice. + LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) + // Get the type name of the field + Name() string +} + +type PatchMetaFromStruct struct { + T reflect.Type +} + +func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) { + t, err := getTagStructType(dataStruct) + return PatchMetaFromStruct{T: t}, err +} + +var _ LookupPatchMeta = PatchMetaFromStruct{} + +func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key) + if err != nil { + return nil, PatchMeta{}, err + } + + return PatchMetaFromStruct{T: fieldType}, + PatchMeta{ + patchStrategies: fieldPatchStrategies, + patchMergeKey: fieldPatchMergeKey, + }, nil +} + +func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key) + if err != nil { + return nil, PatchMeta{}, err + } + elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct) + t := elemPatchMetaFromStruct.T + + var elemType reflect.Type + switch t.Kind() { + // If t is an array or a slice, get the element type. + // If element is still an array or a slice, return an error. + // Otherwise, return element type. + case reflect.Array, reflect.Slice: + elemType = t.Elem() + if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice { + return nil, PatchMeta{}, errors.New("unexpected slice of slice") + } + // If t is an pointer, get the underlying element. + // If the underlying element is neither an array nor a slice, the pointer is pointing to a slice, + // e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822 + // If the underlying element is either an array or a slice, return its element type. + case reflect.Ptr: + t = t.Elem() + if t.Kind() == reflect.Array || t.Kind() == reflect.Slice { + t = t.Elem() + } + elemType = t + default: + return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String()) + } + + return PatchMetaFromStruct{T: elemType}, patchMeta, nil +} + +func (s PatchMetaFromStruct) Name() string { + return s.T.Kind().String() +} + +func getTagStructType(dataStruct interface{}) (reflect.Type, error) { + if dataStruct == nil { + return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) + } + + t := reflect.TypeOf(dataStruct) + // Get the underlying type for pointers + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + if t.Kind() != reflect.Struct { + return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) + } + + return t, nil +} + +func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type { + t, err := getTagStructType(dataStruct) + if err != nil { + panic(err) + } + return t +} + +type PatchMetaFromOpenAPI struct { + Schema openapi.Schema +} + +func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI { + return PatchMetaFromOpenAPI{Schema: s} +} + +var _ LookupPatchMeta = PatchMetaFromOpenAPI{} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + kindItem := NewKindItem(key, s.Schema.GetPath()) + s.Schema.Accept(kindItem) + + err := kindItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: kindItem.subschema}, + kindItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) { + if s.Schema == nil { + return nil, PatchMeta{}, nil + } + sliceItem := NewSliceItem(key, s.Schema.GetPath()) + s.Schema.Accept(sliceItem) + + err := sliceItem.Error() + if err != nil { + return nil, PatchMeta{}, err + } + return PatchMetaFromOpenAPI{Schema: sliceItem.subschema}, + sliceItem.patchmeta, nil +} + +func (s PatchMetaFromOpenAPI) Name() string { + schema := s.Schema + return schema.GetName() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go index 01014f9d..fd08759f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/patch.go @@ -22,9 +22,9 @@ import ( "sort" "strings" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/util/json" "k8s.io/apimachinery/pkg/util/mergepatch" - forkedjson "k8s.io/apimachinery/third_party/forked/golang/json" ) // An alternate implementation of JSON Merge Patch @@ -92,6 +92,16 @@ type MergeOptions struct { // return a patch that yields the modified document when applied to the original document, or an error // if either of the two documents is invalid. func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return CreateTwoWayMergePatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergePatchUsingLookupPatchMeta( + original, modified []byte, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -106,7 +116,7 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f } } - patchMap, err := CreateTwoWayMergeMapPatch(originalMap, modifiedMap, dataStruct, fns...) + patchMap, err := CreateTwoWayMergeMapPatchUsingLookupPatchMeta(originalMap, modifiedMap, schema, fns...) if err != nil { return nil, err } @@ -118,15 +128,19 @@ func CreateTwoWayMergePatch(original, modified []byte, dataStruct interface{}, f // encoded JSONMap. // The serialized version of the map can then be passed to StrategicMergeMapPatch. func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{}, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } + return CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified, schema, fns...) +} + +func CreateTwoWayMergeMapPatchUsingLookupPatchMeta(original, modified JSONMap, schema LookupPatchMeta, fns ...mergepatch.PreconditionFunc) (JSONMap, error) { diffOptions := DiffOptions{ SetElementOrder: true, } - patchMap, err := diffMaps(original, modified, t, diffOptions) + patchMap, err := diffMaps(original, modified, schema, diffOptions) if err != nil { return nil, err } @@ -151,12 +165,9 @@ func CreateTwoWayMergeMapPatch(original, modified JSONMap, dataStruct interface{ // - IFF list of primitives && merge strategy - use parallel deletion list // - IFF list of maps or primitives with replace strategy (default) - set patch value to the value in modified // - Build $retainKeys directive for fields with retainKeys patch strategy -func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOptions DiffOptions) (map[string]interface{}, error) { +func diffMaps(original, modified map[string]interface{}, schema LookupPatchMeta, diffOptions DiffOptions) (map[string]interface{}, error) { patch := map[string]interface{}{} - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } + // This will be used to build the $retainKeys directive sent in the patch retainKeysList := make([]interface{}, 0, len(modified)) @@ -198,10 +209,10 @@ func diffMaps(original, modified map[string]interface{}, t reflect.Type, diffOpt switch originalValueTyped := originalValue.(type) { case map[string]interface{}: modifiedValueTyped := modifiedValue.(map[string]interface{}) - err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleMapDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) case []interface{}: modifiedValueTyped := modifiedValue.([]interface{}) - err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, t, diffOptions) + err = handleSliceDiff(key, originalValueTyped, modifiedValueTyped, patch, schema, diffOptions) default: replacePatchFieldIfNotEqual(key, originalValue, modifiedValue, patch, diffOptions) } @@ -248,8 +259,9 @@ func handleDirectiveMarker(key string, originalValue, modifiedValue interface{}, // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, _, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(key) + if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -259,7 +271,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -271,7 +283,7 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in patch[key] = modifiedValue } default: - patchValue, err := diffMaps(originalValue, modifiedValue, fieldType, diffOptions) + patchValue, err := diffMaps(originalValue, modifiedValue, subschema, diffOptions) if err != nil { return err } @@ -290,8 +302,8 @@ func handleMapDiff(key string, originalValue, modifiedValue, patch map[string]in // patch is the patch map that contains key and the updated value, and it is the parent of originalValue, modifiedValue // diffOptions contains multiple options to control how we do the diff. func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, patch map[string]interface{}, - t reflect.Type, diffOptions DiffOptions) error { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, key) + schema LookupPatchMeta, diffOptions DiffOptions) error { + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(key) if err != nil { // We couldn't look up metadata for the field // If the values are identical, this doesn't matter, no patch is needed @@ -301,7 +313,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Otherwise, return the error return err } - retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) + retainKeys, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } @@ -309,7 +321,7 @@ func handleSliceDiff(key string, originalValue, modifiedValue []interface{}, pat // Merge the 2 slices using mergePatchKey case mergeDirective: diffOptions.BuildRetainKeysDirective = retainKeys - addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, fieldType.Elem(), fieldPatchMergeKey, diffOptions) + addList, deletionList, setOrderList, err := diffLists(originalValue, modifiedValue, subschema, patchMeta.GetPatchMergeKey(), diffOptions) if err != nil { return err } @@ -536,7 +548,7 @@ func normalizeSliceOrder(toSort, order []interface{}, mergeKey string, kind refl // another list to set the order of the list // Only list of primitives with merge strategy will generate a parallel deletion list. // These two lists should yield modified when applied to original, for lists with merge semantics. -func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { +func diffLists(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, []interface{}, error) { if len(original) == 0 { // Both slices are empty - do nothing if len(modified) == 0 || diffOptions.IgnoreChangesAndAdditions { @@ -556,7 +568,7 @@ func diffLists(original, modified []interface{}, t reflect.Type, mergeKey string kind := elementType.Kind() switch kind { case reflect.Map: - patchList, deleteList, err = diffListsOfMaps(original, modified, t, mergeKey, diffOptions) + patchList, deleteList, err = diffListsOfMaps(original, modified, schema, mergeKey, diffOptions) if err != nil { return nil, nil, nil, err } @@ -702,15 +714,15 @@ func compareListValuesAtIndex(list1Inbounds, list2Inbounds bool, list1Value, lis // diffListsOfMaps takes a pair of lists and // returns a (recursive) strategic merge patch list contains additions and changes and // a deletion list contains deletions -func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { +func diffListsOfMaps(original, modified []interface{}, schema LookupPatchMeta, mergeKey string, diffOptions DiffOptions) ([]interface{}, []interface{}, error) { patch := make([]interface{}, 0, len(modified)) deletionList := make([]interface{}, 0, len(original)) - originalSorted, err := sortMergeListsByNameArray(original, t, mergeKey, false) + originalSorted, err := sortMergeListsByNameArray(original, schema, mergeKey, false) if err != nil { return nil, nil, err } - modifiedSorted, err := sortMergeListsByNameArray(modified, t, mergeKey, false) + modifiedSorted, err := sortMergeListsByNameArray(modified, schema, mergeKey, false) if err != nil { return nil, nil, err } @@ -745,7 +757,7 @@ func diffListsOfMaps(original, modified []interface{}, t reflect.Type, mergeKey switch { case bothInBounds && ItemMatchesOriginalAndModifiedSlice(originalElementMergeKeyValueString, modifiedElementMergeKeyValueString): // Merge key values are equal, so recurse - patchValue, err := diffMaps(originalElement, modifiedElement, t, diffOptions) + patchValue, err := diffMaps(originalElement, modifiedElement, schema, diffOptions) if err != nil { return nil, nil, err } @@ -798,6 +810,15 @@ func getMapAndMergeKeyValueByIndex(index int, mergeKey string, listOfMaps []inte // must be json encoded content. A patch can be created from an original and a modified document // by calling CreateStrategicMergePatch. func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte, error) { + schema, err := NewPatchMetaFromStruct(dataStruct) + if err != nil { + return nil, err + } + + return StrategicMergePatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergePatchUsingLookupPatchMeta(original, patch []byte, schema LookupPatchMeta) ([]byte, error) { originalMap, err := handleUnmarshal(original) if err != nil { return nil, err @@ -807,7 +828,7 @@ func StrategicMergePatch(original, patch []byte, dataStruct interface{}) ([]byte return nil, err } - result, err := StrategicMergeMapPatch(originalMap, patchMap, dataStruct) + result, err := StrategicMergeMapPatchUsingLookupPatchMeta(originalMap, patchMap, schema) if err != nil { return nil, err } @@ -828,38 +849,35 @@ func handleUnmarshal(j []byte) (map[string]interface{}, error) { return m, nil } -// StrategicMergePatch applies a strategic merge patch. The original and patch documents +// StrategicMergeMapPatch applies a strategic merge patch. The original and patch documents // must be JSONMap. A patch can be created from an original and modified document by // calling CreateTwoWayMergeMapPatch. // Warning: the original and patch JSONMap objects are mutated by this function and should not be reused. func StrategicMergeMapPatch(original, patch JSONMap, dataStruct interface{}) (JSONMap, error) { - t, err := getTagStructType(dataStruct) + schema, err := NewPatchMetaFromStruct(dataStruct) if err != nil { return nil, err } + + // We need the go struct tags `patchMergeKey` and `patchStrategy` for fields that support a strategic merge patch. + // For native resources, we can easily figure out these tags since we know the fields. + + // Because custom resources are decoded as Unstructured and because we're missing the metadata about how to handle + // each field in a strategic merge patch, we can't find the go struct tags. Hence, we can't easily do a strategic merge + // for custom resources. So we should fail fast and return an error. + if _, ok := dataStruct.(*unstructured.Unstructured); ok { + return nil, mergepatch.ErrUnsupportedStrategicMergePatchFormat + } + + return StrategicMergeMapPatchUsingLookupPatchMeta(original, patch, schema) +} + +func StrategicMergeMapPatchUsingLookupPatchMeta(original, patch JSONMap, schema LookupPatchMeta) (JSONMap, error) { mergeOptions := MergeOptions{ MergeParallelList: true, IgnoreUnmatchedNulls: true, } - return mergeMap(original, patch, t, mergeOptions) -} - -func getTagStructType(dataStruct interface{}) (reflect.Type, error) { - if dataStruct == nil { - return nil, mergepatch.ErrBadArgKind(struct{}{}, nil) - } - - t := reflect.TypeOf(dataStruct) - // Get the underlying type for pointers - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - - if t.Kind() != reflect.Struct { - return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct) - } - - return t, nil + return mergeMap(original, patch, schema, mergeOptions) } // handleDirectiveInMergeMap handles the patch directive when merging 2 maps. @@ -1067,7 +1085,7 @@ func applyRetainKeysDirective(original, patch map[string]interface{}, options Me // The precedence is $setElementOrder > order in patch list > order in live list. // This function will delete the item after merging it to prevent process it again in the future. // Ref: https://git.k8s.io/community/contributors/design-proposals/cli/preserve-order-in-strategic-merge-patch.md -func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) error { +func mergePatchIntoOriginal(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) error { for key, patchV := range patch { // Do nothing if there is no ordering directive if !strings.HasPrefix(key, setElementOrderDirectivePrefix) { @@ -1094,9 +1112,9 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty var ( ok bool originalFieldValue, patchFieldValue, merged []interface{} - patchStrategy, mergeKey string - patchStrategies []string - fieldType reflect.Type + patchStrategy string + patchMeta PatchMeta + subschema LookupPatchMeta ) typedSetElementOrderList, ok := setElementOrderInPatch.([]interface{}) if !ok { @@ -1122,16 +1140,16 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty return mergepatch.ErrBadArgType(patchFieldValue, patchList) } } - fieldType, patchStrategies, mergeKey, err = forkedjson.LookupPatchMetadata(t, originalKey) + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(originalKey) if err != nil { return err } - _, patchStrategy, err = extractRetainKeysPatchStrategy(patchStrategies) + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return err } // Check for consistency between the element order list and the field it applies to - err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, mergeKey) + err = validatePatchWithSetOrderList(patchFieldValue, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1144,8 +1162,8 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // list was added merged = patchFieldValue case foundOriginal && foundPatch: - merged, err = mergeSliceHandler(originalList, patchList, fieldType, - patchStrategy, mergeKey, false, mergeOptions) + merged, err = mergeSliceHandler(originalList, patchList, subschema, + patchStrategy, patchMeta.GetPatchMergeKey(), false, mergeOptions) if err != nil { return err } @@ -1155,13 +1173,13 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // Split all items into patch items and server-only items and then enforce the order. var patchItems, serverOnlyItems []interface{} - if len(mergeKey) == 0 { + if len(patchMeta.GetPatchMergeKey()) == 0 { // Primitives doesn't need merge key to do partitioning. patchItems, serverOnlyItems = partitionPrimitivesByPresentInList(merged, typedSetElementOrderList) } else { // Maps need merge key to do partitioning. - patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, mergeKey) + patchItems, serverOnlyItems, err = partitionMapsByPresentInList(merged, typedSetElementOrderList, patchMeta.GetPatchMergeKey()) if err != nil { return err } @@ -1175,7 +1193,7 @@ func mergePatchIntoOriginal(original, patch map[string]interface{}, t reflect.Ty // normalize merged list // typedSetElementOrderList contains all the relative order in typedPatchList, // so don't need to use typedPatchList - both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, mergeKey, kind) + both, err := normalizeElementOrder(patchItems, serverOnlyItems, typedSetElementOrderList, originalFieldValue, patchMeta.GetPatchMergeKey(), kind) if err != nil { return err } @@ -1237,7 +1255,7 @@ func partitionMapsByPresentInList(original, partitionBy []interface{}, mergeKey // If patch contains any null field (e.g. field_1: null) that is not // present in original, then to propagate it to the end result use // mergeOptions.IgnoreUnmatchedNulls == false. -func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptions MergeOptions) (map[string]interface{}, error) { +func mergeMap(original, patch map[string]interface{}, schema LookupPatchMeta, mergeOptions MergeOptions) (map[string]interface{}, error) { if v, ok := patch[directiveMarker]; ok { return handleDirectiveInMergeMap(v, patch) } @@ -1257,7 +1275,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // When not merging the directive, it will make sure $setElementOrder list exist only in original. // When merging the directive, it will process $setElementOrder and its patch list together. // This function will delete the merged elements from patch so they will not be reprocessed - err = mergePatchIntoOriginal(original, patch, t, mergeOptions) + err = mergePatchIntoOriginal(original, patch, schema, mergeOptions) if err != nil { return nil, err } @@ -1295,11 +1313,6 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } - // If the data type is a pointer, resolve the element. - if t.Kind() == reflect.Ptr { - t = t.Elem() - } - originalType := reflect.TypeOf(original[k]) patchType := reflect.TypeOf(patchV) if originalType != patchType { @@ -1307,22 +1320,27 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio continue } // If they're both maps or lists, recurse into the value. - // First find the fieldPatchStrategy and fieldPatchMergeKey. - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - switch originalType.Kind() { case reflect.Map: - - original[k], err = mergeMapHandler(original[k], patchV, fieldType, patchStrategy, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeMapHandler(original[k], patchV, subschema, patchStrategy, mergeOptions) case reflect.Slice: - original[k], err = mergeSliceHandler(original[k], patchV, fieldType, patchStrategy, fieldPatchMergeKey, isDeleteList, mergeOptions) + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) + if err != nil { + return nil, err + } + original[k], err = mergeSliceHandler(original[k], patchV, subschema, patchStrategy, patchMeta.GetPatchMergeKey(), isDeleteList, mergeOptions) default: original[k] = patchV } @@ -1335,7 +1353,7 @@ func mergeMap(original, patch map[string]interface{}, t reflect.Type, mergeOptio // mergeMapHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy and mergeOptions. -func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, +func mergeMapHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy string, mergeOptions MergeOptions) (map[string]interface{}, error) { typedOriginal, typedPatch, err := mapTypeAssertion(original, patch) if err != nil { @@ -1343,7 +1361,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy != replaceDirective { - return mergeMap(typedOriginal, typedPatch, fieldType, mergeOptions) + return mergeMap(typedOriginal, typedPatch, schema, mergeOptions) } else { return typedPatch, nil } @@ -1351,7 +1369,7 @@ func mergeMapHandler(original, patch interface{}, fieldType reflect.Type, // mergeSliceHandler handles how to merge `patchV` whose key is `key` with `original` respecting // fieldPatchStrategy, fieldPatchMergeKey, isDeleteList and mergeOptions. -func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, +func mergeSliceHandler(original, patch interface{}, schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, isDeleteList bool, mergeOptions MergeOptions) ([]interface{}, error) { typedOriginal, typedPatch, err := sliceTypeAssertion(original, patch) if err != nil { @@ -1359,8 +1377,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, } if fieldPatchStrategy == mergeDirective { - elemType := fieldType.Elem() - return mergeSlice(typedOriginal, typedPatch, elemType, fieldPatchMergeKey, mergeOptions, isDeleteList) + return mergeSlice(typedOriginal, typedPatch, schema, fieldPatchMergeKey, mergeOptions, isDeleteList) } else { return typedPatch, nil } @@ -1369,7 +1386,7 @@ func mergeSliceHandler(original, patch interface{}, fieldType reflect.Type, // Merge two slices together. Note: This may modify both the original slice and // the patch because getting a deep copy of a slice in golang is highly // non-trivial. -func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { +func mergeSlice(original, patch []interface{}, schema LookupPatchMeta, mergeKey string, mergeOptions MergeOptions, isDeleteList bool) ([]interface{}, error) { if len(original) == 0 && len(patch) == 0 { return original, nil } @@ -1394,7 +1411,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s } else { if mergeKey == "" { - return nil, fmt.Errorf("cannot merge lists without merge key for type %s", elemType.Kind().String()) + return nil, fmt.Errorf("cannot merge lists without merge key for %s", schema.Name()) } original, patch, err = mergeSliceWithSpecialElements(original, patch, mergeKey) @@ -1402,7 +1419,7 @@ func mergeSlice(original, patch []interface{}, elemType reflect.Type, mergeKey s return nil, err } - merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, elemType, mergeOptions) + merged, err = mergeSliceWithoutSpecialElements(original, patch, mergeKey, schema, mergeOptions) if err != nil { return nil, err } @@ -1480,7 +1497,7 @@ func deleteMatchingEntries(original []interface{}, mergeKey string, mergeValue i // mergeSliceWithoutSpecialElements merges slices with non-special elements. // original and patch must be slices of maps, they should be checked before calling this function. -func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, elemType reflect.Type, mergeOptions MergeOptions) ([]interface{}, error) { +func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey string, schema LookupPatchMeta, mergeOptions MergeOptions) ([]interface{}, error) { for _, v := range patch { typedV := v.(map[string]interface{}) mergeValue, ok := typedV[mergeKey] @@ -1499,7 +1516,7 @@ func mergeSliceWithoutSpecialElements(original, patch []interface{}, mergeKey st var mergedMaps interface{} var err error // Merge into original. - mergedMaps, err = mergeMap(originalMap, typedV, elemType, mergeOptions) + mergedMaps, err = mergeMap(originalMap, typedV, schema, mergeOptions) if err != nil { return nil, err } @@ -1532,7 +1549,7 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ for k, v := range m { typedV, ok := v.(map[string]interface{}) if !ok { - return nil, 0, false, fmt.Errorf("value for key %v is not a map.", k) + return nil, 0, false, fmt.Errorf("value for key %v is not a map", k) } valueToMatch, ok := typedV[key] @@ -1548,14 +1565,14 @@ func findMapInSliceBasedOnKeyValue(m []interface{}, key string, value interface{ // by key. This is needed by tests because in JSON, list order is significant, // but in Strategic Merge Patch, merge lists do not have significant order. // Sorting the lists allows for order-insensitive comparison of patched maps. -func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error) { +func sortMergeListsByName(mapJSON []byte, schema LookupPatchMeta) ([]byte, error) { var m map[string]interface{} err := json.Unmarshal(mapJSON, &m) if err != nil { - return nil, err + return nil, mergepatch.ErrBadJSONDoc } - newM, err := sortMergeListsByNameMap(m, reflect.TypeOf(dataStruct)) + newM, err := sortMergeListsByNameMap(m, schema) if err != nil { return nil, err } @@ -1564,7 +1581,7 @@ func sortMergeListsByName(mapJSON []byte, dataStruct interface{}) ([]byte, error } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in a map. -func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[string]interface{}, error) { +func sortMergeListsByNameMap(s map[string]interface{}, schema LookupPatchMeta) (map[string]interface{}, error) { newS := map[string]interface{}{} for k, v := range s { if k == retainKeysDirective { @@ -1585,26 +1602,29 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri return nil, mergepatch.ErrBadPatchFormatForSetElementOrderList } } else if k != directiveMarker { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(t, k) - if err != nil { - return nil, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return nil, err - } - - // If v is a map or a merge slice, recurse. - if typedV, ok := v.(map[string]interface{}); ok { - var err error - v, err = sortMergeListsByNameMap(typedV, fieldType) + // recurse for map and slice. + switch typedV := v.(type) { + case map[string]interface{}: + subschema, _, err := schema.LookupPatchMetadataForStruct(k) + if err != nil { + return nil, err + } + v, err = sortMergeListsByNameMap(typedV, subschema) + if err != nil { + return nil, err + } + case []interface{}: + subschema, patchMeta, err := schema.LookupPatchMetadataForSlice(k) + if err != nil { + return nil, err + } + _, patchStrategy, err := extractRetainKeysPatchStrategy(patchMeta.GetPatchStrategies()) if err != nil { return nil, err } - } else if typedV, ok := v.([]interface{}); ok { if patchStrategy == mergeDirective { var err error - v, err = sortMergeListsByNameArray(typedV, fieldType.Elem(), fieldPatchMergeKey, true) + v, err = sortMergeListsByNameArray(typedV, subschema, patchMeta.GetPatchMergeKey(), true) if err != nil { return nil, err } @@ -1619,7 +1639,7 @@ func sortMergeListsByNameMap(s map[string]interface{}, t reflect.Type) (map[stri } // Function sortMergeListsByNameMap recursively sorts the merge lists by its mergeKey in an array. -func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey string, recurse bool) ([]interface{}, error) { +func sortMergeListsByNameArray(s []interface{}, schema LookupPatchMeta, mergeKey string, recurse bool) ([]interface{}, error) { if len(s) == 0 { return s, nil } @@ -1642,7 +1662,7 @@ func sortMergeListsByNameArray(s []interface{}, elemType reflect.Type, mergeKey for _, elem := range s { if recurse { typedElem := elem.(map[string]interface{}) - newElem, err := sortMergeListsByNameMap(typedElem, elemType) + newElem, err := sortMergeListsByNameMap(typedElem, schema) if err != nil { return nil, err } @@ -1788,18 +1808,13 @@ func sliceElementType(slices ...[]interface{}) (reflect.Type, error) { // objects overlap with different values in any key. All keys are required to be // strings. Since patches of the same Type have congruent keys, this is valid // for multiple patch types. This method supports strategic merge patch semantics. -func MergingMapsHaveConflicts(left, right map[string]interface{}, dataStruct interface{}) (bool, error) { - t, err := getTagStructType(dataStruct) - if err != nil { - return true, err - } - - return mergingMapFieldsHaveConflicts(left, right, t, "", "") +func MergingMapsHaveConflicts(left, right map[string]interface{}, schema LookupPatchMeta) (bool, error) { + return mergingMapFieldsHaveConflicts(left, right, schema, "", "") } func mergingMapFieldsHaveConflicts( left, right interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { switch leftType := left.(type) { @@ -1830,15 +1845,14 @@ func mergingMapFieldsHaveConflicts( return false, nil } // Check the individual keys. - return mapsHaveConflicts(leftType, rightType, fieldType) + return mapsHaveConflicts(leftType, rightType, schema) case []interface{}: rightType, ok := right.([]interface{}) if !ok { return true, nil } - return slicesHaveConflicts(leftType, rightType, fieldType, fieldPatchStrategy, fieldPatchMergeKey) - + return slicesHaveConflicts(leftType, rightType, schema, fieldPatchStrategy, fieldPatchMergeKey) case string, float64, bool, int, int64, nil: return !reflect.DeepEqual(left, right), nil default: @@ -1846,21 +1860,37 @@ func mergingMapFieldsHaveConflicts( } } -func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if key != directiveMarker && key != retainKeysDirective { if rightValue, ok := typedRight[key]; ok { - fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadata(structType, key) - if err != nil { - return true, err - } - _, patchStrategy, err := extractRetainKeysPatchStrategy(fieldPatchStrategies) - if err != nil { - return true, err + var subschema LookupPatchMeta + var patchMeta PatchMeta + var patchStrategy string + var err error + switch leftValue.(type) { + case []interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForSlice(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } + case map[string]interface{}: + subschema, patchMeta, err = schema.LookupPatchMetadataForStruct(key) + if err != nil { + return true, err + } + _, patchStrategy, err = extractRetainKeysPatchStrategy(patchMeta.patchStrategies) + if err != nil { + return true, err + } } if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, - fieldType, patchStrategy, fieldPatchMergeKey); hasConflicts { + subschema, patchStrategy, patchMeta.GetPatchMergeKey()); hasConflicts { return true, err } } @@ -1872,7 +1902,7 @@ func mapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType func slicesHaveConflicts( typedLeft, typedRight []interface{}, - fieldType reflect.Type, + schema LookupPatchMeta, fieldPatchStrategy, fieldPatchMergeKey string, ) (bool, error) { elementType, err := sliceElementType(typedLeft, typedRight) @@ -1880,7 +1910,6 @@ func slicesHaveConflicts( return true, err } - valueType := fieldType.Elem() if fieldPatchStrategy == mergeDirective { // Merging lists of scalars have no conflicts by definition // So we only need to check further if the elements are maps @@ -1899,7 +1928,7 @@ func slicesHaveConflicts( return true, err } - return mapsOfMapsHaveConflicts(leftMap, rightMap, valueType) + return mapsOfMapsHaveConflicts(leftMap, rightMap, schema) } // Either we don't have type information, or these are non-merging lists @@ -1917,7 +1946,7 @@ func slicesHaveConflicts( // Compare the slices element by element in order // This test will fail if the slices are not sorted for i := range typedLeft { - if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], valueType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(typedLeft[i], typedRight[i], schema, "", ""); hasConflicts { return true, err } } @@ -1944,10 +1973,10 @@ func sliceOfMapsToMapOfMaps(slice []interface{}, mergeKey string) (map[string]in return result, nil } -func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, structType reflect.Type) (bool, error) { +func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, schema LookupPatchMeta) (bool, error) { for key, leftValue := range typedLeft { if rightValue, ok := typedRight[key]; ok { - if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, structType, "", ""); hasConflicts { + if hasConflicts, err := mergingMapFieldsHaveConflicts(leftValue, rightValue, schema, "", ""); hasConflicts { return true, err } } @@ -1967,7 +1996,7 @@ func mapsOfMapsHaveConflicts(typedLeft, typedRight map[string]interface{}, struc // in a way that is different from how it is changed in current (e.g., deleting it, changing its // value). We also propagate values fields that do not exist in original but are explicitly // defined in modified. -func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct interface{}, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { +func CreateThreeWayMergePatch(original, modified, current []byte, schema LookupPatchMeta, overwrite bool, fns ...mergepatch.PreconditionFunc) ([]byte, error) { originalMap := map[string]interface{}{} if len(original) > 0 { if err := json.Unmarshal(original, &originalMap); err != nil { @@ -1989,11 +2018,6 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int } } - t, err := getTagStructType(dataStruct) - if err != nil { - return nil, err - } - // The patch is the difference from current to modified without deletions, plus deletions // from original to modified. To find it, we compute deletions, which are the deletions from // original to modified, and delta, which is the difference from current to modified without @@ -2002,7 +2026,7 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int IgnoreDeletions: true, SetElementOrder: true, } - deltaMap, err := diffMaps(currentMap, modifiedMap, t, deltaMapDiffOptions) + deltaMap, err := diffMaps(currentMap, modifiedMap, schema, deltaMapDiffOptions) if err != nil { return nil, err } @@ -2010,13 +2034,13 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int SetElementOrder: true, IgnoreChangesAndAdditions: true, } - deletionsMap, err := diffMaps(originalMap, modifiedMap, t, deletionsMapDiffOptions) + deletionsMap, err := diffMaps(originalMap, modifiedMap, schema, deletionsMapDiffOptions) if err != nil { return nil, err } mergeOptions := MergeOptions{} - patchMap, err := mergeMap(deletionsMap, deltaMap, t, mergeOptions) + patchMap, err := mergeMap(deletionsMap, deltaMap, schema, mergeOptions) if err != nil { return nil, err } @@ -2032,12 +2056,12 @@ func CreateThreeWayMergePatch(original, modified, current []byte, dataStruct int // then return a conflict error. if !overwrite { changeMapDiffOptions := DiffOptions{} - changedMap, err := diffMaps(originalMap, currentMap, t, changeMapDiffOptions) + changedMap, err := diffMaps(originalMap, currentMap, schema, changeMapDiffOptions) if err != nil { return nil, err } - hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, dataStruct) + hasConflicts, err := MergingMapsHaveConflicts(patchMap, changedMap, schema) if err != nil { return nil, err } diff --git a/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go new file mode 100644 index 00000000..f84d65aa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/strategicpatch/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package strategicpatch + +import ( + "errors" + "strings" + + "k8s.io/apimachinery/pkg/util/mergepatch" + openapi "k8s.io/kube-openapi/pkg/util/proto" +) + +const ( + patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy" + patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key" +) + +type LookupPatchItem interface { + openapi.SchemaVisitor + + Error() error + Path() *openapi.Path +} + +type kindItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewKindItem(key string, path *openapi.Path) *kindItem { + return &kindItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &kindItem{} + +func (item *kindItem) Error() error { + return item.err +} + +func (item *kindItem) Path() *openapi.Path { + return item.path +} + +func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected kind, but got primitive") +} + +func (item *kindItem) VisitArray(schema *openapi.Array) { + item.err = errors.New("expected kind, but got slice") +} + +func (item *kindItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected kind, but got map") +} + +func (item *kindItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } +} + +func (item *kindItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.subschema = subschema +} + +type sliceItem struct { + key string + path *openapi.Path + err error + patchmeta PatchMeta + subschema openapi.Schema + hasVisitKind bool +} + +func NewSliceItem(key string, path *openapi.Path) *sliceItem { + return &sliceItem{ + key: key, + path: path, + } +} + +var _ LookupPatchItem = &sliceItem{} + +func (item *sliceItem) Error() error { + return item.err +} + +func (item *sliceItem) Path() *openapi.Path { + return item.path +} + +func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) { + item.err = errors.New("expected slice, but got primitive") +} + +func (item *sliceItem) VisitArray(schema *openapi.Array) { + if !item.hasVisitKind { + item.err = errors.New("expected visit kind first, then visit array") + } + subschema := schema.SubType + item.subschema = subschema +} + +func (item *sliceItem) VisitMap(schema *openapi.Map) { + item.err = errors.New("expected slice, but got map") +} + +func (item *sliceItem) VisitReference(schema openapi.Reference) { + if !item.hasVisitKind { + schema.SubSchema().Accept(item) + } else { + item.subschema = schema.SubSchema() + } +} + +func (item *sliceItem) VisitKind(schema *openapi.Kind) { + subschema, ok := schema.Fields[item.key] + if !ok { + item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key} + return + } + + mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions()) + if err != nil { + item.err = err + return + } + item.patchmeta = PatchMeta{ + patchStrategies: patchStrategies, + patchMergeKey: mergeKey, + } + item.hasVisitKind = true + subschema.Accept(item) +} + +func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) { + ps, foundPS := extensions[patchStrategyOpenapiextensionKey] + var patchStrategies []string + var mergeKey, patchStrategy string + var ok bool + if foundPS { + patchStrategy, ok = ps.(string) + if ok { + patchStrategies = strings.Split(patchStrategy, ",") + } else { + return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps) + } + } + mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey] + if foundMK { + mergeKey, ok = mk.(string) + if !ok { + return "", nil, mergepatch.ErrBadArgType(mergeKey, mk) + } + } + return mergeKey, patchStrategies, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go index 322923d4..ab590e13 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package watch -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go index 006972ec..8205a4dd 100644 --- a/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go @@ -26,17 +26,14 @@ const ( // struct field given the struct type and the JSON name of the field. // It returns field type, a slice of patch strategies, merge key and error. // TODO: fix the returned errors to be introspectable. -func LookupPatchMetadata(t reflect.Type, jsonField string) ( +func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) ( elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) { if t.Kind() == reflect.Ptr { t = t.Elem() } - if t.Kind() == reflect.Map { - elemType = t.Elem() - return - } + if t.Kind() != reflect.Struct { - e = fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s", + e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s", t.Kind().String()) return } diff --git a/vendor/k8s.io/client-go/discovery/unstructured.go b/vendor/k8s.io/client-go/discovery/unstructured.go index ee72d668..fa7f2ec0 100644 --- a/vendor/k8s.io/client-go/discovery/unstructured.go +++ b/vendor/k8s.io/client-go/discovery/unstructured.go @@ -17,22 +17,28 @@ limitations under the License. package discovery import ( - "fmt" + "reflect" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" ) -// UnstructuredObjectTyper provides a runtime.ObjectTyper implmentation for +// UnstructuredObjectTyper provides a runtime.ObjectTyper implementation for // runtime.Unstructured object based on discovery information. type UnstructuredObjectTyper struct { registered map[schema.GroupVersionKind]bool + typers []runtime.ObjectTyper } // NewUnstructuredObjectTyper returns a runtime.ObjectTyper for -// unstructred objects based on discovery information. -func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *UnstructuredObjectTyper { - dot := &UnstructuredObjectTyper{registered: make(map[schema.GroupVersionKind]bool)} +// unstructured objects based on discovery information. It accepts a list of fallback typers +// for handling objects that are not runtime.Unstructured. It does not delegate the Recognizes +// check, only ObjectKinds. +func NewUnstructuredObjectTyper(groupResources []*APIGroupResources, typers ...runtime.ObjectTyper) *UnstructuredObjectTyper { + dot := &UnstructuredObjectTyper{ + registered: make(map[schema.GroupVersionKind]bool), + typers: typers, + } for _, group := range groupResources { for _, discoveryVersion := range group.Group.Versions { resources, ok := group.VersionedResources[discoveryVersion.Version] @@ -49,29 +55,35 @@ func NewUnstructuredObjectTyper(groupResources []*APIGroupResources) *Unstructur return dot } -// ObjectKind returns the group,version,kind of the provided object, or an error -// if the object in not runtime.Unstructured or has no group,version,kind -// information. -func (d *UnstructuredObjectTyper) ObjectKind(obj runtime.Object) (schema.GroupVersionKind, error) { - if _, ok := obj.(runtime.Unstructured); !ok { - return schema.GroupVersionKind{}, fmt.Errorf("type %T is invalid for dynamic object typer", obj) - } - - return obj.GetObjectKind().GroupVersionKind(), nil -} - // ObjectKinds returns a slice of one element with the group,version,kind of the // provided object, or an error if the object is not runtime.Unstructured or // has no group,version,kind information. unversionedType will always be false // because runtime.Unstructured object should always have group,version,kind // information set. func (d *UnstructuredObjectTyper) ObjectKinds(obj runtime.Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return nil, false, err + if _, ok := obj.(runtime.Unstructured); ok { + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, runtime.NewMissingKindErr("object has no kind field ") + } + if len(gvk.Version) == 0 { + return nil, false, runtime.NewMissingVersionErr("object has no apiVersion field") + } + return []schema.GroupVersionKind{gvk}, false, nil } - - return []schema.GroupVersionKind{gvk}, false, nil + var lastErr error + for _, typer := range d.typers { + gvks, unversioned, err := typer.ObjectKinds(obj) + if err != nil { + lastErr = err + continue + } + return gvks, unversioned, nil + } + if lastErr == nil { + lastErr = runtime.NewNotRegisteredErrForType(reflect.TypeOf(obj)) + } + return nil, false, lastErr } // Recognizes returns true if the provided group,version,kind was in the @@ -80,16 +92,4 @@ func (d *UnstructuredObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { return d.registered[gvk] } -// IsUnversioned returns false always because runtime.Unstructured objects -// should always have group,version,kind information set. ok will be true if the -// object's group,version,kind is api.Registry. -func (d *UnstructuredObjectTyper) IsUnversioned(obj runtime.Object) (unversioned bool, ok bool) { - gvk, err := d.ObjectKind(obj) - if err != nil { - return false, false - } - - return false, d.registered[gvk] -} - var _ runtime.ObjectTyper = &UnstructuredObjectTyper{} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go new file mode 100644 index 00000000..74bfb601 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/interface.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package admissionregistration + +import ( + v1alpha1 "k8s.io/client-go/informers/admissionregistration/v1alpha1" + v1beta1 "k8s.io/client-go/informers/admissionregistration/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 00000000..0f55c737 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + admissionregistration_v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/admissionregistration/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// InitializerConfigurationInformer provides access to a shared informer and lister for +// InitializerConfigurations. +type InitializerConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.InitializerConfigurationLister +} + +type initializerConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredInitializerConfigurationInformer constructs a new informer for InitializerConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredInitializerConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1alpha1().InitializerConfigurations().Watch(options) + }, + }, + &admissionregistration_v1alpha1.InitializerConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *initializerConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredInitializerConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *initializerConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1alpha1.InitializerConfiguration{}, f.defaultInformer) +} + +func (f *initializerConfigurationInformer) Lister() v1alpha1.InitializerConfigurationLister { + return v1alpha1.NewInitializerConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go new file mode 100644 index 00000000..44da0479 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // InitializerConfigurations returns a InitializerConfigurationInformer. + InitializerConfigurations() InitializerConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// InitializerConfigurations returns a InitializerConfigurationInformer. +func (v *version) InitializerConfigurations() InitializerConfigurationInformer { + return &initializerConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go new file mode 100644 index 00000000..4f08d69a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/interface.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. + MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer + // ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// MutatingWebhookConfigurations returns a MutatingWebhookConfigurationInformer. +func (v *version) MutatingWebhookConfigurations() MutatingWebhookConfigurationInformer { + return &mutatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ValidatingWebhookConfigurations returns a ValidatingWebhookConfigurationInformer. +func (v *version) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInformer { + return &validatingWebhookConfigurationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..31a2a865 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// MutatingWebhookConfigurationInformer provides access to a shared informer and lister for +// MutatingWebhookConfigurations. +type MutatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.MutatingWebhookConfigurationLister +} + +type mutatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredMutatingWebhookConfigurationInformer constructs a new informer for MutatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredMutatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().MutatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.MutatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *mutatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredMutatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *mutatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.MutatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *mutatingWebhookConfigurationInformer) Lister() v1beta1.MutatingWebhookConfigurationLister { + return v1beta1.NewMutatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..d87ab900 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + admissionregistration_v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/admissionregistration/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ValidatingWebhookConfigurationInformer provides access to a shared informer and lister for +// ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ValidatingWebhookConfigurationLister +} + +type validatingWebhookConfigurationInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredValidatingWebhookConfigurationInformer constructs a new informer for ValidatingWebhookConfiguration type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredValidatingWebhookConfigurationInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AdmissionregistrationV1beta1().ValidatingWebhookConfigurations().Watch(options) + }, + }, + &admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, + resyncPeriod, + indexers, + ) +} + +func (f *validatingWebhookConfigurationInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredValidatingWebhookConfigurationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *validatingWebhookConfigurationInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&admissionregistration_v1beta1.ValidatingWebhookConfiguration{}, f.defaultInformer) +} + +func (f *validatingWebhookConfigurationInformer) Lister() v1beta1.ValidatingWebhookConfigurationLister { + return v1beta1.NewValidatingWebhookConfigurationLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/interface.go b/vendor/k8s.io/client-go/informers/apps/interface.go new file mode 100644 index 00000000..fdd32de0 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package apps + +import ( + v1 "k8s.io/client-go/informers/apps/v1" + v1beta1 "k8s.io/client-go/informers/apps/v1beta1" + v1beta2 "k8s.io/client-go/informers/apps/v1beta2" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface + // V1beta2 provides access to shared informers for resources in V1beta2. + V1beta2() v1beta2.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta2 returns a new v1beta2.Interface. +func (g *group) V1beta2() v1beta2.Interface { + return v1beta2.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go new file mode 100644 index 00000000..a69be9c7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1.ControllerRevisionLister { + return v1.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go new file mode 100644 index 00000000..1c7abf7d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().DaemonSets(namespace).Watch(options) + }, + }, + &apps_v1.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1.DaemonSetLister { + return v1.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go new file mode 100644 index 00000000..9f6beed6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().Deployments(namespace).Watch(options) + }, + }, + &apps_v1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1.DeploymentLister { + return v1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1/interface.go new file mode 100644 index 00000000..6145fd6c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go new file mode 100644 index 00000000..1ac50607 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().ReplicaSets(namespace).Watch(options) + }, + }, + &apps_v1.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1.ReplicaSetLister { + return v1.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go new file mode 100644 index 00000000..535790df --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + apps_v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/apps/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1.StatefulSetLister { + return v1.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go new file mode 100644 index 00000000..1e2de416 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1beta1.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1beta1.ControllerRevisionLister { + return v1beta1.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go new file mode 100644 index 00000000..4d2dea57 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().Deployments(namespace).Watch(options) + }, + }, + &apps_v1beta1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { + return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go new file mode 100644 index 00000000..3a51a1f5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/interface.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go new file mode 100644 index 00000000..779ae2c6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta1/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/apps/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta1().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1beta1.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta1.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1beta1.StatefulSetLister { + return v1beta1.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go new file mode 100644 index 00000000..a7d55ab4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/controllerrevision.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ControllerRevisionInformer provides access to a shared informer and lister for +// ControllerRevisions. +type ControllerRevisionInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.ControllerRevisionLister +} + +type controllerRevisionInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredControllerRevisionInformer constructs a new informer for ControllerRevision type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredControllerRevisionInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ControllerRevisions(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ControllerRevisions(namespace).Watch(options) + }, + }, + &apps_v1beta2.ControllerRevision{}, + resyncPeriod, + indexers, + ) +} + +func (f *controllerRevisionInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredControllerRevisionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *controllerRevisionInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.ControllerRevision{}, f.defaultInformer) +} + +func (f *controllerRevisionInformer) Lister() v1beta2.ControllerRevisionLister { + return v1beta2.NewControllerRevisionLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go new file mode 100644 index 00000000..5d328802 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().DaemonSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1beta2.DaemonSetLister { + return v1beta2.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go new file mode 100644 index 00000000..6b6cd603 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().Deployments(namespace).Watch(options) + }, + }, + &apps_v1beta2.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta2.DeploymentLister { + return v1beta2.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go new file mode 100644 index 00000000..59a6e73d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ControllerRevisions returns a ControllerRevisionInformer. + ControllerRevisions() ControllerRevisionInformer + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer + // StatefulSets returns a StatefulSetInformer. + StatefulSets() StatefulSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ControllerRevisions returns a ControllerRevisionInformer. +func (v *version) ControllerRevisions() ControllerRevisionInformer { + return &controllerRevisionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// StatefulSets returns a StatefulSetInformer. +func (v *version) StatefulSets() StatefulSetInformer { + return &statefulSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go new file mode 100644 index 00000000..988a3e4f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().ReplicaSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1beta2.ReplicaSetLister { + return v1beta2.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go new file mode 100644 index 00000000..dff9c240 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/apps/v1beta2/statefulset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta2 + +import ( + apps_v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta2 "k8s.io/client-go/listers/apps/v1beta2" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StatefulSetInformer provides access to a shared informer and lister for +// StatefulSets. +type StatefulSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta2.StatefulSetLister +} + +type statefulSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredStatefulSetInformer constructs a new informer for StatefulSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStatefulSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().StatefulSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AppsV1beta2().StatefulSets(namespace).Watch(options) + }, + }, + &apps_v1beta2.StatefulSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *statefulSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStatefulSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *statefulSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&apps_v1beta2.StatefulSet{}, f.defaultInformer) +} + +func (f *statefulSetInformer) Lister() v1beta2.StatefulSetLister { + return v1beta2.NewStatefulSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/interface.go new file mode 100644 index 00000000..63a5c0cc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/interface.go @@ -0,0 +1,54 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package autoscaling + +import ( + v1 "k8s.io/client-go/informers/autoscaling/v1" + v2beta1 "k8s.io/client-go/informers/autoscaling/v2beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V2beta1 provides access to shared informers for resources in V2beta1. + V2beta1() v2beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V2beta1 returns a new v2beta1.Interface. +func (g *group) V2beta1() v2beta1.Interface { + return v2beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 00000000..7d875e73 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + autoscaling_v1 "k8s.io/api/autoscaling/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/autoscaling/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for +// HorizontalPodAutoscalers. +type HorizontalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.HorizontalPodAutoscalerLister +} + +type horizontalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV1().HorizontalPodAutoscalers(namespace).Watch(options) + }, + }, + &autoscaling_v1.HorizontalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_v1.HorizontalPodAutoscaler{}, f.defaultInformer) +} + +func (f *horizontalPodAutoscalerInformer) Lister() v1.HorizontalPodAutoscalerLister { + return v1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go new file mode 100644 index 00000000..5ba90701 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. + HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go new file mode 100644 index 00000000..9865f8e1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2beta1 + +import ( + autoscaling_v2beta1 "k8s.io/api/autoscaling/v2beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v2beta1 "k8s.io/client-go/listers/autoscaling/v2beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// HorizontalPodAutoscalerInformer provides access to a shared informer and lister for +// HorizontalPodAutoscalers. +type HorizontalPodAutoscalerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2beta1.HorizontalPodAutoscalerLister +} + +type horizontalPodAutoscalerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredHorizontalPodAutoscalerInformer constructs a new informer for HorizontalPodAutoscaler type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredHorizontalPodAutoscalerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.AutoscalingV2beta1().HorizontalPodAutoscalers(namespace).Watch(options) + }, + }, + &autoscaling_v2beta1.HorizontalPodAutoscaler{}, + resyncPeriod, + indexers, + ) +} + +func (f *horizontalPodAutoscalerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredHorizontalPodAutoscalerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *horizontalPodAutoscalerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&autoscaling_v2beta1.HorizontalPodAutoscaler{}, f.defaultInformer) +} + +func (f *horizontalPodAutoscalerInformer) Lister() v2beta1.HorizontalPodAutoscalerLister { + return v2beta1.NewHorizontalPodAutoscalerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go new file mode 100644 index 00000000..4c9ea849 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/autoscaling/v2beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. + HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// HorizontalPodAutoscalers returns a HorizontalPodAutoscalerInformer. +func (v *version) HorizontalPodAutoscalers() HorizontalPodAutoscalerInformer { + return &horizontalPodAutoscalerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/interface.go b/vendor/k8s.io/client-go/informers/batch/interface.go new file mode 100644 index 00000000..bbaec796 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package batch + +import ( + v1 "k8s.io/client-go/informers/batch/v1" + v1beta1 "k8s.io/client-go/informers/batch/v1beta1" + v2alpha1 "k8s.io/client-go/informers/batch/v2alpha1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface + // V2alpha1 provides access to shared informers for resources in V2alpha1. + V2alpha1() v2alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V2alpha1 returns a new v2alpha1.Interface. +func (g *group) V2alpha1() v2alpha1.Interface { + return v2alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1/interface.go new file mode 100644 index 00000000..41c08ea2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Jobs returns a JobInformer. + Jobs() JobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Jobs returns a JobInformer. +func (v *version) Jobs() JobInformer { + return &jobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1/job.go b/vendor/k8s.io/client-go/informers/batch/v1/job.go new file mode 100644 index 00000000..8a2e5f0d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1/job.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + batch_v1 "k8s.io/api/batch/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/batch/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// JobInformer provides access to a shared informer and lister for +// Jobs. +type JobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.JobLister +} + +type jobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredJobInformer constructs a new informer for Job type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1().Jobs(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1().Jobs(namespace).Watch(options) + }, + }, + &batch_v1.Job{}, + resyncPeriod, + indexers, + ) +} + +func (f *jobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *jobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v1.Job{}, f.defaultInformer) +} + +func (f *jobInformer) Lister() v1.JobLister { + return v1.NewJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go new file mode 100644 index 00000000..4edfd415 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/cronjob.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/batch/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CronJobInformer provides access to a shared informer and lister for +// CronJobs. +type CronJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CronJobLister +} + +type cronJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1beta1().CronJobs(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV1beta1().CronJobs(namespace).Watch(options) + }, + }, + &batch_v1beta1.CronJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cronJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v1beta1.CronJob{}, f.defaultInformer) +} + +func (f *cronJobInformer) Lister() v1beta1.CronJobLister { + return v1beta1.NewCronJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go new file mode 100644 index 00000000..0ba1935d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CronJobs returns a CronJobInformer. + CronJobs() CronJobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CronJobs returns a CronJobInformer. +func (v *version) CronJobs() CronJobInformer { + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go new file mode 100644 index 00000000..03a6e6f8 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/cronjob.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2alpha1 + +import ( + batch_v2alpha1 "k8s.io/api/batch/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v2alpha1 "k8s.io/client-go/listers/batch/v2alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CronJobInformer provides access to a shared informer and lister for +// CronJobs. +type CronJobInformer interface { + Informer() cache.SharedIndexInformer + Lister() v2alpha1.CronJobLister +} + +type cronJobInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredCronJobInformer constructs a new informer for CronJob type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCronJobInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV2alpha1().CronJobs(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.BatchV2alpha1().CronJobs(namespace).Watch(options) + }, + }, + &batch_v2alpha1.CronJob{}, + resyncPeriod, + indexers, + ) +} + +func (f *cronJobInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCronJobInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *cronJobInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&batch_v2alpha1.CronJob{}, f.defaultInformer) +} + +func (f *cronJobInformer) Lister() v2alpha1.CronJobLister { + return v2alpha1.NewCronJobLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go new file mode 100644 index 00000000..39b6f33f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/batch/v2alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v2alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CronJobs returns a CronJobInformer. + CronJobs() CronJobInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CronJobs returns a CronJobInformer. +func (v *version) CronJobs() CronJobInformer { + return &cronJobInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/certificates/interface.go b/vendor/k8s.io/client-go/informers/certificates/interface.go new file mode 100644 index 00000000..1eefe479 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package certificates + +import ( + v1beta1 "k8s.io/client-go/informers/certificates/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 00000000..44aac5c7 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + certificates_v1beta1 "k8s.io/api/certificates/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/certificates/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// CertificateSigningRequestInformer provides access to a shared informer and lister for +// CertificateSigningRequests. +type CertificateSigningRequestInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.CertificateSigningRequestLister +} + +type certificateSigningRequestInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredCertificateSigningRequestInformer constructs a new informer for CertificateSigningRequest type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredCertificateSigningRequestInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1beta1().CertificateSigningRequests().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CertificatesV1beta1().CertificateSigningRequests().Watch(options) + }, + }, + &certificates_v1beta1.CertificateSigningRequest{}, + resyncPeriod, + indexers, + ) +} + +func (f *certificateSigningRequestInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredCertificateSigningRequestInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *certificateSigningRequestInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&certificates_v1beta1.CertificateSigningRequest{}, f.defaultInformer) +} + +func (f *certificateSigningRequestInformer) Lister() v1beta1.CertificateSigningRequestLister { + return v1beta1.NewCertificateSigningRequestLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go new file mode 100644 index 00000000..8578023c --- /dev/null +++ b/vendor/k8s.io/client-go/informers/certificates/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // CertificateSigningRequests returns a CertificateSigningRequestInformer. + CertificateSigningRequests() CertificateSigningRequestInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// CertificateSigningRequests returns a CertificateSigningRequestInformer. +func (v *version) CertificateSigningRequests() CertificateSigningRequestInformer { + return &certificateSigningRequestInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/core/interface.go b/vendor/k8s.io/client-go/informers/core/interface.go new file mode 100644 index 00000000..7fc2a5cd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package core + +import ( + v1 "k8s.io/client-go/informers/core/v1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go new file mode 100644 index 00000000..77b17fd3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/componentstatus.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ComponentStatusInformer provides access to a shared informer and lister for +// ComponentStatuses. +type ComponentStatusInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ComponentStatusLister +} + +type componentStatusInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredComponentStatusInformer constructs a new informer for ComponentStatus type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredComponentStatusInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ComponentStatuses().Watch(options) + }, + }, + &core_v1.ComponentStatus{}, + resyncPeriod, + indexers, + ) +} + +func (f *componentStatusInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredComponentStatusInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *componentStatusInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ComponentStatus{}, f.defaultInformer) +} + +func (f *componentStatusInformer) Lister() v1.ComponentStatusLister { + return v1.NewComponentStatusLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/configmap.go b/vendor/k8s.io/client-go/informers/core/v1/configmap.go new file mode 100644 index 00000000..ed0f4c2d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/configmap.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ConfigMapInformer provides access to a shared informer and lister for +// ConfigMaps. +type ConfigMapInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ConfigMapLister +} + +type configMapInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredConfigMapInformer constructs a new informer for ConfigMap type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredConfigMapInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ConfigMaps(namespace).Watch(options) + }, + }, + &core_v1.ConfigMap{}, + resyncPeriod, + indexers, + ) +} + +func (f *configMapInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredConfigMapInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *configMapInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ConfigMap{}, f.defaultInformer) +} + +func (f *configMapInformer) Lister() v1.ConfigMapLister { + return v1.NewConfigMapLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/endpoints.go b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go new file mode 100644 index 00000000..8a7228ba --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/endpoints.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EndpointsInformer provides access to a shared informer and lister for +// Endpoints. +type EndpointsInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EndpointsLister +} + +type endpointsInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEndpointsInformer constructs a new informer for Endpoints type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEndpointsInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Endpoints(namespace).Watch(options) + }, + }, + &core_v1.Endpoints{}, + resyncPeriod, + indexers, + ) +} + +func (f *endpointsInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEndpointsInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *endpointsInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Endpoints{}, f.defaultInformer) +} + +func (f *endpointsInformer) Lister() v1.EndpointsLister { + return v1.NewEndpointsLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/event.go b/vendor/k8s.io/client-go/informers/core/v1/event.go new file mode 100644 index 00000000..23f5ead6 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/event.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Events(namespace).Watch(options) + }, + }, + &core_v1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1.EventLister { + return v1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/interface.go b/vendor/k8s.io/client-go/informers/core/v1/interface.go new file mode 100644 index 00000000..e560b12f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/interface.go @@ -0,0 +1,150 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ComponentStatuses returns a ComponentStatusInformer. + ComponentStatuses() ComponentStatusInformer + // ConfigMaps returns a ConfigMapInformer. + ConfigMaps() ConfigMapInformer + // Endpoints returns a EndpointsInformer. + Endpoints() EndpointsInformer + // Events returns a EventInformer. + Events() EventInformer + // LimitRanges returns a LimitRangeInformer. + LimitRanges() LimitRangeInformer + // Namespaces returns a NamespaceInformer. + Namespaces() NamespaceInformer + // Nodes returns a NodeInformer. + Nodes() NodeInformer + // PersistentVolumes returns a PersistentVolumeInformer. + PersistentVolumes() PersistentVolumeInformer + // PersistentVolumeClaims returns a PersistentVolumeClaimInformer. + PersistentVolumeClaims() PersistentVolumeClaimInformer + // Pods returns a PodInformer. + Pods() PodInformer + // PodTemplates returns a PodTemplateInformer. + PodTemplates() PodTemplateInformer + // ReplicationControllers returns a ReplicationControllerInformer. + ReplicationControllers() ReplicationControllerInformer + // ResourceQuotas returns a ResourceQuotaInformer. + ResourceQuotas() ResourceQuotaInformer + // Secrets returns a SecretInformer. + Secrets() SecretInformer + // Services returns a ServiceInformer. + Services() ServiceInformer + // ServiceAccounts returns a ServiceAccountInformer. + ServiceAccounts() ServiceAccountInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ComponentStatuses returns a ComponentStatusInformer. +func (v *version) ComponentStatuses() ComponentStatusInformer { + return &componentStatusInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ConfigMaps returns a ConfigMapInformer. +func (v *version) ConfigMaps() ConfigMapInformer { + return &configMapInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Endpoints returns a EndpointsInformer. +func (v *version) Endpoints() EndpointsInformer { + return &endpointsInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// LimitRanges returns a LimitRangeInformer. +func (v *version) LimitRanges() LimitRangeInformer { + return &limitRangeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Namespaces returns a NamespaceInformer. +func (v *version) Namespaces() NamespaceInformer { + return &namespaceInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Nodes returns a NodeInformer. +func (v *version) Nodes() NodeInformer { + return &nodeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumes returns a PersistentVolumeInformer. +func (v *version) PersistentVolumes() PersistentVolumeInformer { + return &persistentVolumeInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// PersistentVolumeClaims returns a PersistentVolumeClaimInformer. +func (v *version) PersistentVolumeClaims() PersistentVolumeClaimInformer { + return &persistentVolumeClaimInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Pods returns a PodInformer. +func (v *version) Pods() PodInformer { + return &podInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodTemplates returns a PodTemplateInformer. +func (v *version) PodTemplates() PodTemplateInformer { + return &podTemplateInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ReplicationControllers returns a ReplicationControllerInformer. +func (v *version) ReplicationControllers() ReplicationControllerInformer { + return &replicationControllerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ResourceQuotas returns a ResourceQuotaInformer. +func (v *version) ResourceQuotas() ResourceQuotaInformer { + return &resourceQuotaInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Secrets returns a SecretInformer. +func (v *version) Secrets() SecretInformer { + return &secretInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Services returns a ServiceInformer. +func (v *version) Services() ServiceInformer { + return &serviceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// ServiceAccounts returns a ServiceAccountInformer. +func (v *version) ServiceAccounts() ServiceAccountInformer { + return &serviceAccountInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/limitrange.go b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go new file mode 100644 index 00000000..9588b940 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/limitrange.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// LimitRangeInformer provides access to a shared informer and lister for +// LimitRanges. +type LimitRangeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.LimitRangeLister +} + +type limitRangeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredLimitRangeInformer constructs a new informer for LimitRange type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredLimitRangeInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().LimitRanges(namespace).Watch(options) + }, + }, + &core_v1.LimitRange{}, + resyncPeriod, + indexers, + ) +} + +func (f *limitRangeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredLimitRangeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *limitRangeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.LimitRange{}, f.defaultInformer) +} + +func (f *limitRangeInformer) Lister() v1.LimitRangeLister { + return v1.NewLimitRangeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/namespace.go b/vendor/k8s.io/client-go/informers/core/v1/namespace.go new file mode 100644 index 00000000..eb841b15 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/namespace.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NamespaceInformer provides access to a shared informer and lister for +// Namespaces. +type NamespaceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NamespaceLister +} + +type namespaceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNamespaceInformer constructs a new informer for Namespace type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNamespaceInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Namespaces().Watch(options) + }, + }, + &core_v1.Namespace{}, + resyncPeriod, + indexers, + ) +} + +func (f *namespaceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNamespaceInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *namespaceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Namespace{}, f.defaultInformer) +} + +func (f *namespaceInformer) Lister() v1.NamespaceLister { + return v1.NewNamespaceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/node.go b/vendor/k8s.io/client-go/informers/core/v1/node.go new file mode 100644 index 00000000..3c70e52b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/node.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NodeInformer provides access to a shared informer and lister for +// Nodes. +type NodeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NodeLister +} + +type nodeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredNodeInformer constructs a new informer for Node type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNodeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Nodes().Watch(options) + }, + }, + &core_v1.Node{}, + resyncPeriod, + indexers, + ) +} + +func (f *nodeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNodeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *nodeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Node{}, f.defaultInformer) +} + +func (f *nodeInformer) Lister() v1.NodeLister { + return v1.NewNodeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go new file mode 100644 index 00000000..e944560f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolume.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PersistentVolumeInformer provides access to a shared informer and lister for +// PersistentVolumes. +type PersistentVolumeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeLister +} + +type persistentVolumeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeInformer constructs a new informer for PersistentVolume type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumes().Watch(options) + }, + }, + &core_v1.PersistentVolume{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PersistentVolume{}, f.defaultInformer) +} + +func (f *persistentVolumeInformer) Lister() v1.PersistentVolumeLister { + return v1.NewPersistentVolumeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000..136884d4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PersistentVolumeClaimInformer provides access to a shared informer and lister for +// PersistentVolumeClaims. +type PersistentVolumeClaimInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PersistentVolumeClaimLister +} + +type persistentVolumeClaimInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPersistentVolumeClaimInformer constructs a new informer for PersistentVolumeClaim type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPersistentVolumeClaimInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PersistentVolumeClaims(namespace).Watch(options) + }, + }, + &core_v1.PersistentVolumeClaim{}, + resyncPeriod, + indexers, + ) +} + +func (f *persistentVolumeClaimInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPersistentVolumeClaimInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *persistentVolumeClaimInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PersistentVolumeClaim{}, f.defaultInformer) +} + +func (f *persistentVolumeClaimInformer) Lister() v1.PersistentVolumeClaimLister { + return v1.NewPersistentVolumeClaimLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/pod.go b/vendor/k8s.io/client-go/informers/core/v1/pod.go new file mode 100644 index 00000000..b9720829 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/pod.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodInformer provides access to a shared informer and lister for +// Pods. +type PodInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodLister +} + +type podInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodInformer constructs a new informer for Pod type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Pods(namespace).Watch(options) + }, + }, + &core_v1.Pod{}, + resyncPeriod, + indexers, + ) +} + +func (f *podInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Pod{}, f.defaultInformer) +} + +func (f *podInformer) Lister() v1.PodLister { + return v1.NewPodLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go new file mode 100644 index 00000000..c0575385 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/podtemplate.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodTemplateInformer provides access to a shared informer and lister for +// PodTemplates. +type PodTemplateInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.PodTemplateLister +} + +type podTemplateInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodTemplateInformer constructs a new informer for PodTemplate type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodTemplateInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().PodTemplates(namespace).Watch(options) + }, + }, + &core_v1.PodTemplate{}, + resyncPeriod, + indexers, + ) +} + +func (f *podTemplateInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodTemplateInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podTemplateInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.PodTemplate{}, f.defaultInformer) +} + +func (f *podTemplateInformer) Lister() v1.PodTemplateLister { + return v1.NewPodTemplateLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go new file mode 100644 index 00000000..e04cd146 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/replicationcontroller.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicationControllerInformer provides access to a shared informer and lister for +// ReplicationControllers. +type ReplicationControllerInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ReplicationControllerLister +} + +type replicationControllerInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicationControllerInformer constructs a new informer for ReplicationController type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicationControllerInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ReplicationControllers(namespace).Watch(options) + }, + }, + &core_v1.ReplicationController{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicationControllerInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicationControllerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicationControllerInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ReplicationController{}, f.defaultInformer) +} + +func (f *replicationControllerInformer) Lister() v1.ReplicationControllerLister { + return v1.NewReplicationControllerLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go new file mode 100644 index 00000000..3ef4f4c1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/resourcequota.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ResourceQuotaInformer provides access to a shared informer and lister for +// ResourceQuotas. +type ResourceQuotaInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ResourceQuotaLister +} + +type resourceQuotaInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredResourceQuotaInformer constructs a new informer for ResourceQuota type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredResourceQuotaInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ResourceQuotas(namespace).Watch(options) + }, + }, + &core_v1.ResourceQuota{}, + resyncPeriod, + indexers, + ) +} + +func (f *resourceQuotaInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredResourceQuotaInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *resourceQuotaInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ResourceQuota{}, f.defaultInformer) +} + +func (f *resourceQuotaInformer) Lister() v1.ResourceQuotaLister { + return v1.NewResourceQuotaLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/secret.go b/vendor/k8s.io/client-go/informers/core/v1/secret.go new file mode 100644 index 00000000..7bc6395a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/secret.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// SecretInformer provides access to a shared informer and lister for +// Secrets. +type SecretInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.SecretLister +} + +type secretInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredSecretInformer constructs a new informer for Secret type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredSecretInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Secrets(namespace).Watch(options) + }, + }, + &core_v1.Secret{}, + resyncPeriod, + indexers, + ) +} + +func (f *secretInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredSecretInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *secretInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Secret{}, f.defaultInformer) +} + +func (f *secretInformer) Lister() v1.SecretLister { + return v1.NewSecretLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/service.go b/vendor/k8s.io/client-go/informers/core/v1/service.go new file mode 100644 index 00000000..d1b5ed02 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/service.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ServiceInformer provides access to a shared informer and lister for +// Services. +type ServiceInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceLister +} + +type serviceInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceInformer constructs a new informer for Service type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().Services(namespace).Watch(options) + }, + }, + &core_v1.Service{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.Service{}, f.defaultInformer) +} + +func (f *serviceInformer) Lister() v1.ServiceLister { + return v1.NewServiceLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go new file mode 100644 index 00000000..fb9c50aa --- /dev/null +++ b/vendor/k8s.io/client-go/informers/core/v1/serviceaccount.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + core_v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/core/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ServiceAccountInformer provides access to a shared informer and lister for +// ServiceAccounts. +type ServiceAccountInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ServiceAccountLister +} + +type serviceAccountInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredServiceAccountInformer constructs a new informer for ServiceAccount type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredServiceAccountInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.CoreV1().ServiceAccounts(namespace).Watch(options) + }, + }, + &core_v1.ServiceAccount{}, + resyncPeriod, + indexers, + ) +} + +func (f *serviceAccountInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredServiceAccountInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&core_v1.ServiceAccount{}, f.defaultInformer) +} + +func (f *serviceAccountInformer) Lister() v1.ServiceAccountLister { + return v1.NewServiceAccountLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/events/interface.go b/vendor/k8s.io/client-go/informers/events/interface.go new file mode 100644 index 00000000..81f6646f --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package events + +import ( + v1beta1 "k8s.io/client-go/informers/events/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/event.go b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go new file mode 100644 index 00000000..d604b4cf --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/event.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + events_v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/events/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// EventInformer provides access to a shared informer and lister for +// Events. +type EventInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.EventLister +} + +type eventInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredEventInformer constructs a new informer for Event type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredEventInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.EventsV1beta1().Events(namespace).Watch(options) + }, + }, + &events_v1beta1.Event{}, + resyncPeriod, + indexers, + ) +} + +func (f *eventInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredEventInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *eventInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&events_v1beta1.Event{}, f.defaultInformer) +} + +func (f *eventInformer) Lister() v1beta1.EventLister { + return v1beta1.NewEventLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go new file mode 100644 index 00000000..d079afed --- /dev/null +++ b/vendor/k8s.io/client-go/informers/events/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // Events returns a EventInformer. + Events() EventInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// Events returns a EventInformer. +func (v *version) Events() EventInformer { + return &eventInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/interface.go b/vendor/k8s.io/client-go/informers/extensions/interface.go new file mode 100644 index 00000000..a6bfc3b4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package extensions + +import ( + v1beta1 "k8s.io/client-go/informers/extensions/v1beta1" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go new file mode 100644 index 00000000..c64b14c3 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/daemonset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DaemonSetInformer provides access to a shared informer and lister for +// DaemonSets. +type DaemonSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DaemonSetLister +} + +type daemonSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDaemonSetInformer constructs a new informer for DaemonSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDaemonSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().DaemonSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().DaemonSets(namespace).Watch(options) + }, + }, + &extensions_v1beta1.DaemonSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *daemonSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDaemonSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *daemonSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.DaemonSet{}, f.defaultInformer) +} + +func (f *daemonSetInformer) Lister() v1beta1.DaemonSetLister { + return v1beta1.NewDaemonSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go new file mode 100644 index 00000000..4bcfc5c2 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/deployment.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// DeploymentInformer provides access to a shared informer and lister for +// Deployments. +type DeploymentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.DeploymentLister +} + +type deploymentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredDeploymentInformer constructs a new informer for Deployment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredDeploymentInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Deployments(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Deployments(namespace).Watch(options) + }, + }, + &extensions_v1beta1.Deployment{}, + resyncPeriod, + indexers, + ) +} + +func (f *deploymentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredDeploymentInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *deploymentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.Deployment{}, f.defaultInformer) +} + +func (f *deploymentInformer) Lister() v1beta1.DeploymentLister { + return v1beta1.NewDeploymentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go new file mode 100644 index 00000000..22dac92b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/ingress.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// IngressInformer provides access to a shared informer and lister for +// Ingresses. +type IngressInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.IngressLister +} + +type ingressInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredIngressInformer constructs a new informer for Ingress type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredIngressInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Ingresses(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().Ingresses(namespace).Watch(options) + }, + }, + &extensions_v1beta1.Ingress{}, + resyncPeriod, + indexers, + ) +} + +func (f *ingressInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredIngressInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *ingressInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.Ingress{}, f.defaultInformer) +} + +func (f *ingressInformer) Lister() v1beta1.IngressLister { + return v1beta1.NewIngressLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go new file mode 100644 index 00000000..ce060e0d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/interface.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // DaemonSets returns a DaemonSetInformer. + DaemonSets() DaemonSetInformer + // Deployments returns a DeploymentInformer. + Deployments() DeploymentInformer + // Ingresses returns a IngressInformer. + Ingresses() IngressInformer + // PodSecurityPolicies returns a PodSecurityPolicyInformer. + PodSecurityPolicies() PodSecurityPolicyInformer + // ReplicaSets returns a ReplicaSetInformer. + ReplicaSets() ReplicaSetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// DaemonSets returns a DaemonSetInformer. +func (v *version) DaemonSets() DaemonSetInformer { + return &daemonSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Deployments returns a DeploymentInformer. +func (v *version) Deployments() DeploymentInformer { + return &deploymentInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// Ingresses returns a IngressInformer. +func (v *version) Ingresses() IngressInformer { + return &ingressInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// PodSecurityPolicies returns a PodSecurityPolicyInformer. +func (v *version) PodSecurityPolicies() PodSecurityPolicyInformer { + return &podSecurityPolicyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ReplicaSets returns a ReplicaSetInformer. +func (v *version) ReplicaSets() ReplicaSetInformer { + return &replicaSetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000..18ef2735 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodSecurityPolicyInformer provides access to a shared informer and lister for +// PodSecurityPolicies. +type PodSecurityPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodSecurityPolicyLister +} + +type podSecurityPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPodSecurityPolicyInformer constructs a new informer for PodSecurityPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodSecurityPolicyInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().PodSecurityPolicies().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().PodSecurityPolicies().Watch(options) + }, + }, + &extensions_v1beta1.PodSecurityPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *podSecurityPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodSecurityPolicyInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podSecurityPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.PodSecurityPolicy{}, f.defaultInformer) +} + +func (f *podSecurityPolicyInformer) Lister() v1beta1.PodSecurityPolicyLister { + return v1beta1.NewPodSecurityPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go new file mode 100644 index 00000000..856cb30b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/extensions/v1beta1/replicaset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/extensions/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ReplicaSetInformer provides access to a shared informer and lister for +// ReplicaSets. +type ReplicaSetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ReplicaSetLister +} + +type replicaSetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredReplicaSetInformer constructs a new informer for ReplicaSet type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredReplicaSetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().ReplicaSets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ExtensionsV1beta1().ReplicaSets(namespace).Watch(options) + }, + }, + &extensions_v1beta1.ReplicaSet{}, + resyncPeriod, + indexers, + ) +} + +func (f *replicaSetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredReplicaSetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *replicaSetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&extensions_v1beta1.ReplicaSet{}, f.defaultInformer) +} + +func (f *replicaSetInformer) Lister() v1beta1.ReplicaSetLister { + return v1beta1.NewReplicaSetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/factory.go b/vendor/k8s.io/client-go/informers/factory.go new file mode 100644 index 00000000..e922c127 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/factory.go @@ -0,0 +1,208 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package informers + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + admissionregistration "k8s.io/client-go/informers/admissionregistration" + apps "k8s.io/client-go/informers/apps" + autoscaling "k8s.io/client-go/informers/autoscaling" + batch "k8s.io/client-go/informers/batch" + certificates "k8s.io/client-go/informers/certificates" + core "k8s.io/client-go/informers/core" + events "k8s.io/client-go/informers/events" + extensions "k8s.io/client-go/informers/extensions" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + networking "k8s.io/client-go/informers/networking" + policy "k8s.io/client-go/informers/policy" + rbac "k8s.io/client-go/informers/rbac" + scheduling "k8s.io/client-go/informers/scheduling" + settings "k8s.io/client-go/informers/settings" + storage "k8s.io/client-go/informers/storage" + kubernetes "k8s.io/client-go/kubernetes" + cache "k8s.io/client-go/tools/cache" + reflect "reflect" + sync "sync" + time "time" +) + +type sharedInformerFactory struct { + client kubernetes.Interface + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc + lock sync.Mutex + defaultResync time.Duration + + informers map[reflect.Type]cache.SharedIndexInformer + // startedInformers is used for tracking which informers have been started. + // This allows Start() to be called multiple times safely. + startedInformers map[reflect.Type]bool +} + +// NewSharedInformerFactory constructs a new instance of sharedInformerFactory +func NewSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration) SharedInformerFactory { + return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil) +} + +// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory. +// Listers obtained via this SharedInformerFactory will be subject to the same filters +// as specified here. +func NewFilteredSharedInformerFactory(client kubernetes.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory { + return &sharedInformerFactory{ + client: client, + namespace: namespace, + tweakListOptions: tweakListOptions, + defaultResync: defaultResync, + informers: make(map[reflect.Type]cache.SharedIndexInformer), + startedInformers: make(map[reflect.Type]bool), + } +} + +// Start initializes all requested informers. +func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) { + f.lock.Lock() + defer f.lock.Unlock() + + for informerType, informer := range f.informers { + if !f.startedInformers[informerType] { + go informer.Run(stopCh) + f.startedInformers[informerType] = true + } + } +} + +// WaitForCacheSync waits for all started informers' cache were synced. +func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool { + informers := func() map[reflect.Type]cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informers := map[reflect.Type]cache.SharedIndexInformer{} + for informerType, informer := range f.informers { + if f.startedInformers[informerType] { + informers[informerType] = informer + } + } + return informers + }() + + res := map[reflect.Type]bool{} + for informType, informer := range informers { + res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced) + } + return res +} + +// InternalInformerFor returns the SharedIndexInformer for obj using an internal +// client. +func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer { + f.lock.Lock() + defer f.lock.Unlock() + + informerType := reflect.TypeOf(obj) + informer, exists := f.informers[informerType] + if exists { + return informer + } + informer = newFunc(f.client, f.defaultResync) + f.informers[informerType] = informer + + return informer +} + +// SharedInformerFactory provides shared informers for resources in all known +// API group versions. +type SharedInformerFactory interface { + internalinterfaces.SharedInformerFactory + ForResource(resource schema.GroupVersionResource) (GenericInformer, error) + WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool + + Admissionregistration() admissionregistration.Interface + Apps() apps.Interface + Autoscaling() autoscaling.Interface + Batch() batch.Interface + Certificates() certificates.Interface + Core() core.Interface + Events() events.Interface + Extensions() extensions.Interface + Networking() networking.Interface + Policy() policy.Interface + Rbac() rbac.Interface + Scheduling() scheduling.Interface + Settings() settings.Interface + Storage() storage.Interface +} + +func (f *sharedInformerFactory) Admissionregistration() admissionregistration.Interface { + return admissionregistration.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Apps() apps.Interface { + return apps.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Autoscaling() autoscaling.Interface { + return autoscaling.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Batch() batch.Interface { + return batch.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Certificates() certificates.Interface { + return certificates.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Core() core.Interface { + return core.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Events() events.Interface { + return events.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Extensions() extensions.Interface { + return extensions.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Networking() networking.Interface { + return networking.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Policy() policy.Interface { + return policy.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Rbac() rbac.Interface { + return rbac.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Scheduling() scheduling.Interface { + return scheduling.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Settings() settings.Interface { + return settings.New(f, f.namespace, f.tweakListOptions) +} + +func (f *sharedInformerFactory) Storage() storage.Interface { + return storage.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/generic.go b/vendor/k8s.io/client-go/informers/generic.go new file mode 100644 index 00000000..70ed4331 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/generic.go @@ -0,0 +1,254 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package informers + +import ( + "fmt" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/api/apps/v1" + apps_v1beta1 "k8s.io/api/apps/v1beta1" + v1beta2 "k8s.io/api/apps/v1beta2" + autoscaling_v1 "k8s.io/api/autoscaling/v1" + v2beta1 "k8s.io/api/autoscaling/v2beta1" + batch_v1 "k8s.io/api/batch/v1" + batch_v1beta1 "k8s.io/api/batch/v1beta1" + v2alpha1 "k8s.io/api/batch/v2alpha1" + certificates_v1beta1 "k8s.io/api/certificates/v1beta1" + core_v1 "k8s.io/api/core/v1" + events_v1beta1 "k8s.io/api/events/v1beta1" + extensions_v1beta1 "k8s.io/api/extensions/v1beta1" + networking_v1 "k8s.io/api/networking/v1" + policy_v1beta1 "k8s.io/api/policy/v1beta1" + rbac_v1 "k8s.io/api/rbac/v1" + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" + settings_v1alpha1 "k8s.io/api/settings/v1alpha1" + storage_v1 "k8s.io/api/storage/v1" + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" + storage_v1beta1 "k8s.io/api/storage/v1beta1" + schema "k8s.io/apimachinery/pkg/runtime/schema" + cache "k8s.io/client-go/tools/cache" +) + +// GenericInformer is type of SharedIndexInformer which will locate and delegate to other +// sharedInformers based on type +type GenericInformer interface { + Informer() cache.SharedIndexInformer + Lister() cache.GenericLister +} + +type genericInformer struct { + informer cache.SharedIndexInformer + resource schema.GroupResource +} + +// Informer returns the SharedIndexInformer. +func (f *genericInformer) Informer() cache.SharedIndexInformer { + return f.informer +} + +// Lister returns the GenericLister. +func (f *genericInformer) Lister() cache.GenericLister { + return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource) +} + +// ForResource gives generic access to a shared informer of the matching type +// TODO extend this to unknown resources with a client pool +func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) { + switch resource { + // Group=admissionregistration.k8s.io, Version=v1alpha1 + case v1alpha1.SchemeGroupVersion.WithResource("initializerconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1alpha1().InitializerConfigurations().Informer()}, nil + + // Group=admissionregistration.k8s.io, Version=v1beta1 + case v1beta1.SchemeGroupVersion.WithResource("mutatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().MutatingWebhookConfigurations().Informer()}, nil + case v1beta1.SchemeGroupVersion.WithResource("validatingwebhookconfigurations"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Admissionregistration().V1beta1().ValidatingWebhookConfigurations().Informer()}, nil + + // Group=apps, Version=v1 + case v1.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ControllerRevisions().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().DaemonSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().Deployments().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().ReplicaSets().Informer()}, nil + case v1.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1().StatefulSets().Informer()}, nil + + // Group=apps, Version=v1beta1 + case apps_v1beta1.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().ControllerRevisions().Informer()}, nil + case apps_v1beta1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().Deployments().Informer()}, nil + case apps_v1beta1.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta1().StatefulSets().Informer()}, nil + + // Group=apps, Version=v1beta2 + case v1beta2.SchemeGroupVersion.WithResource("controllerrevisions"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ControllerRevisions().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().DaemonSets().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().Deployments().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().ReplicaSets().Informer()}, nil + case v1beta2.SchemeGroupVersion.WithResource("statefulsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Apps().V1beta2().StatefulSets().Informer()}, nil + + // Group=autoscaling, Version=v1 + case autoscaling_v1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V1().HorizontalPodAutoscalers().Informer()}, nil + + // Group=autoscaling, Version=v2beta1 + case v2beta1.SchemeGroupVersion.WithResource("horizontalpodautoscalers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Autoscaling().V2beta1().HorizontalPodAutoscalers().Informer()}, nil + + // Group=batch, Version=v1 + case batch_v1.SchemeGroupVersion.WithResource("jobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1().Jobs().Informer()}, nil + + // Group=batch, Version=v1beta1 + case batch_v1beta1.SchemeGroupVersion.WithResource("cronjobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V1beta1().CronJobs().Informer()}, nil + + // Group=batch, Version=v2alpha1 + case v2alpha1.SchemeGroupVersion.WithResource("cronjobs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Batch().V2alpha1().CronJobs().Informer()}, nil + + // Group=certificates.k8s.io, Version=v1beta1 + case certificates_v1beta1.SchemeGroupVersion.WithResource("certificatesigningrequests"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Certificates().V1beta1().CertificateSigningRequests().Informer()}, nil + + // Group=core, Version=v1 + case core_v1.SchemeGroupVersion.WithResource("componentstatuses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ComponentStatuses().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("configmaps"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ConfigMaps().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("endpoints"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Endpoints().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("events"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Events().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("limitranges"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().LimitRanges().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("namespaces"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Namespaces().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("nodes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Nodes().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("persistentvolumes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumes().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("persistentvolumeclaims"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PersistentVolumeClaims().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("pods"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Pods().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("podtemplates"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().PodTemplates().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("replicationcontrollers"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ReplicationControllers().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("resourcequotas"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ResourceQuotas().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("secrets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Secrets().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("services"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().Services().Informer()}, nil + case core_v1.SchemeGroupVersion.WithResource("serviceaccounts"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Core().V1().ServiceAccounts().Informer()}, nil + + // Group=events.k8s.io, Version=v1beta1 + case events_v1beta1.SchemeGroupVersion.WithResource("events"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Events().V1beta1().Events().Informer()}, nil + + // Group=extensions, Version=v1beta1 + case extensions_v1beta1.SchemeGroupVersion.WithResource("daemonsets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().DaemonSets().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("deployments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Deployments().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("ingresses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().Ingresses().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("podsecuritypolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().PodSecurityPolicies().Informer()}, nil + case extensions_v1beta1.SchemeGroupVersion.WithResource("replicasets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Extensions().V1beta1().ReplicaSets().Informer()}, nil + + // Group=networking.k8s.io, Version=v1 + case networking_v1.SchemeGroupVersion.WithResource("networkpolicies"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + + // Group=policy, Version=v1beta1 + case policy_v1beta1.SchemeGroupVersion.WithResource("poddisruptionbudgets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Policy().V1beta1().PodDisruptionBudgets().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1 + case rbac_v1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoles().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().ClusterRoleBindings().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().Roles().Informer()}, nil + case rbac_v1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1().RoleBindings().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1alpha1 + case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoles().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().ClusterRoleBindings().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().Roles().Informer()}, nil + case rbac_v1alpha1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1alpha1().RoleBindings().Informer()}, nil + + // Group=rbac.authorization.k8s.io, Version=v1beta1 + case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterroles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoles().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("clusterrolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().ClusterRoleBindings().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("roles"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().Roles().Informer()}, nil + case rbac_v1beta1.SchemeGroupVersion.WithResource("rolebindings"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Rbac().V1beta1().RoleBindings().Informer()}, nil + + // Group=scheduling.k8s.io, Version=v1alpha1 + case scheduling_v1alpha1.SchemeGroupVersion.WithResource("priorityclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Scheduling().V1alpha1().PriorityClasses().Informer()}, nil + + // Group=settings.k8s.io, Version=v1alpha1 + case settings_v1alpha1.SchemeGroupVersion.WithResource("podpresets"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Settings().V1alpha1().PodPresets().Informer()}, nil + + // Group=storage.k8s.io, Version=v1 + case storage_v1.SchemeGroupVersion.WithResource("storageclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1().StorageClasses().Informer()}, nil + + // Group=storage.k8s.io, Version=v1alpha1 + case storage_v1alpha1.SchemeGroupVersion.WithResource("volumeattachments"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1alpha1().VolumeAttachments().Informer()}, nil + + // Group=storage.k8s.io, Version=v1beta1 + case storage_v1beta1.SchemeGroupVersion.WithResource("storageclasses"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Storage().V1beta1().StorageClasses().Informer()}, nil + + } + + return nil, fmt.Errorf("no informer found for %v", resource) +} diff --git a/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go new file mode 100644 index 00000000..61155f74 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/internalinterfaces/factory_interfaces.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package internalinterfaces + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + kubernetes "k8s.io/client-go/kubernetes" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +type NewInformerFunc func(kubernetes.Interface, time.Duration) cache.SharedIndexInformer + +// SharedInformerFactory a small interface to allow for adding an informer without an import cycle +type SharedInformerFactory interface { + Start(stopCh <-chan struct{}) + InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer +} + +type TweakListOptionsFunc func(*v1.ListOptions) diff --git a/vendor/k8s.io/client-go/informers/networking/interface.go b/vendor/k8s.io/client-go/informers/networking/interface.go new file mode 100644 index 00000000..79e0d0c1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package networking + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/networking/v1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1/interface.go b/vendor/k8s.io/client-go/informers/networking/v1/interface.go new file mode 100644 index 00000000..980a7be9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // NetworkPolicies returns a NetworkPolicyInformer. + NetworkPolicies() NetworkPolicyInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// NetworkPolicies returns a NetworkPolicyInformer. +func (v *version) NetworkPolicies() NetworkPolicyInformer { + return &networkPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go new file mode 100644 index 00000000..b712ba03 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/networking/v1/networkpolicy.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + networking_v1 "k8s.io/api/networking/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/networking/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// NetworkPolicyInformer provides access to a shared informer and lister for +// NetworkPolicies. +type NetworkPolicyInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.NetworkPolicyLister +} + +type networkPolicyInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredNetworkPolicyInformer constructs a new informer for NetworkPolicy type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredNetworkPolicyInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1().NetworkPolicies(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1().NetworkPolicies(namespace).Watch(options) + }, + }, + &networking_v1.NetworkPolicy{}, + resyncPeriod, + indexers, + ) +} + +func (f *networkPolicyInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredNetworkPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *networkPolicyInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networking_v1.NetworkPolicy{}, f.defaultInformer) +} + +func (f *networkPolicyInformer) Lister() v1.NetworkPolicyLister { + return v1.NewNetworkPolicyLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/policy/interface.go b/vendor/k8s.io/client-go/informers/policy/interface.go new file mode 100644 index 00000000..f893c3d5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package policy + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1beta1 "k8s.io/client-go/informers/policy/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go new file mode 100644 index 00000000..f235ee1d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PodDisruptionBudgets returns a PodDisruptionBudgetInformer. + PodDisruptionBudgets() PodDisruptionBudgetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PodDisruptionBudgets returns a PodDisruptionBudgetInformer. +func (v *version) PodDisruptionBudgets() PodDisruptionBudgetInformer { + return &podDisruptionBudgetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 00000000..ba0da35b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + policy_v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/policy/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodDisruptionBudgetInformer provides access to a shared informer and lister for +// PodDisruptionBudgets. +type PodDisruptionBudgetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.PodDisruptionBudgetLister +} + +type podDisruptionBudgetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodDisruptionBudgetInformer constructs a new informer for PodDisruptionBudget type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodDisruptionBudgetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.PolicyV1beta1().PodDisruptionBudgets(namespace).Watch(options) + }, + }, + &policy_v1beta1.PodDisruptionBudget{}, + resyncPeriod, + indexers, + ) +} + +func (f *podDisruptionBudgetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodDisruptionBudgetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podDisruptionBudgetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&policy_v1beta1.PodDisruptionBudget{}, f.defaultInformer) +} + +func (f *podDisruptionBudgetInformer) Lister() v1beta1.PodDisruptionBudgetLister { + return v1beta1.NewPodDisruptionBudgetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/interface.go b/vendor/k8s.io/client-go/informers/rbac/interface.go new file mode 100644 index 00000000..df7adfcd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package rbac + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/rbac/v1" + v1alpha1 "k8s.io/client-go/informers/rbac/v1alpha1" + v1beta1 "k8s.io/client-go/informers/rbac/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go new file mode 100644 index 00000000..ac75abbc --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoles().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1.ClusterRoleLister { + return v1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go new file mode 100644 index 00000000..a3c73e58 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1.ClusterRoleBindingLister { + return v1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go new file mode 100644 index 00000000..1e46b039 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1/role.go new file mode 100644 index 00000000..fb1de461 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().Roles(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1.RoleLister { + return v1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go new file mode 100644 index 00000000..78c78fa1 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + rbac_v1 "k8s.io/api/rbac/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/rbac/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1.RoleBindingLister { + return v1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go new file mode 100644 index 00000000..ec257965 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoles().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1alpha1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1alpha1.ClusterRoleLister { + return v1alpha1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 00000000..a2d0c396 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1alpha1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1alpha1.ClusterRoleBindingLister { + return v1alpha1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go new file mode 100644 index 00000000..586283d4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go new file mode 100644 index 00000000..4564b336 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().Roles(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1alpha1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1alpha1.RoleLister { + return v1alpha1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go new file mode 100644 index 00000000..556f966a --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + rbac_v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/rbac/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1alpha1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1alpha1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1alpha1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1alpha1.RoleBindingLister { + return v1alpha1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go new file mode 100644 index 00000000..821746b9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrole.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleInformer provides access to a shared informer and lister for +// ClusterRoles. +type ClusterRoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ClusterRoleLister +} + +type clusterRoleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleInformer constructs a new informer for ClusterRole type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoles().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoles().Watch(options) + }, + }, + &rbac_v1beta1.ClusterRole{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.ClusterRole{}, f.defaultInformer) +} + +func (f *clusterRoleInformer) Lister() v1beta1.ClusterRoleLister { + return v1beta1.NewClusterRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 00000000..c517ac45 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// ClusterRoleBindingInformer provides access to a shared informer and lister for +// ClusterRoleBindings. +type ClusterRoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.ClusterRoleBindingLister +} + +type clusterRoleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterRoleBindingInformer constructs a new informer for ClusterRoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterRoleBindingInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoleBindings().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().ClusterRoleBindings().Watch(options) + }, + }, + &rbac_v1beta1.ClusterRoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterRoleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterRoleBindingInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.ClusterRoleBinding{}, f.defaultInformer) +} + +func (f *clusterRoleBindingInformer) Lister() v1beta1.ClusterRoleBindingLister { + return v1beta1.NewClusterRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go new file mode 100644 index 00000000..9d375d94 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/interface.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterRoles returns a ClusterRoleInformer. + ClusterRoles() ClusterRoleInformer + // ClusterRoleBindings returns a ClusterRoleBindingInformer. + ClusterRoleBindings() ClusterRoleBindingInformer + // Roles returns a RoleInformer. + Roles() RoleInformer + // RoleBindings returns a RoleBindingInformer. + RoleBindings() RoleBindingInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterRoles returns a ClusterRoleInformer. +func (v *version) ClusterRoles() ClusterRoleInformer { + return &clusterRoleInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// ClusterRoleBindings returns a ClusterRoleBindingInformer. +func (v *version) ClusterRoleBindings() ClusterRoleBindingInformer { + return &clusterRoleBindingInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} + +// Roles returns a RoleInformer. +func (v *version) Roles() RoleInformer { + return &roleInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} + +// RoleBindings returns a RoleBindingInformer. +func (v *version) RoleBindings() RoleBindingInformer { + return &roleBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go new file mode 100644 index 00000000..0f13d3aa --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/role.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleInformer provides access to a shared informer and lister for +// Roles. +type RoleInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RoleLister +} + +type roleInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleInformer constructs a new informer for Role type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().Roles(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().Roles(namespace).Watch(options) + }, + }, + &rbac_v1beta1.Role{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.Role{}, f.defaultInformer) +} + +func (f *roleInformer) Lister() v1beta1.RoleLister { + return v1beta1.NewRoleLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go new file mode 100644 index 00000000..c951d97d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/rbac/v1beta1/rolebinding.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + rbac_v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/rbac/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// RoleBindingInformer provides access to a shared informer and lister for +// RoleBindings. +type RoleBindingInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.RoleBindingLister +} + +type roleBindingInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredRoleBindingInformer constructs a new informer for RoleBinding type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredRoleBindingInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().RoleBindings(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.RbacV1beta1().RoleBindings(namespace).Watch(options) + }, + }, + &rbac_v1beta1.RoleBinding{}, + resyncPeriod, + indexers, + ) +} + +func (f *roleBindingInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredRoleBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *roleBindingInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&rbac_v1beta1.RoleBinding{}, f.defaultInformer) +} + +func (f *roleBindingInformer) Lister() v1beta1.RoleBindingLister { + return v1beta1.NewRoleBindingLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/interface.go b/vendor/k8s.io/client-go/informers/scheduling/interface.go new file mode 100644 index 00000000..60b63e8e --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package scheduling + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/scheduling/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go new file mode 100644 index 00000000..1cceef7b --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PriorityClasses returns a PriorityClassInformer. + PriorityClasses() PriorityClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PriorityClasses returns a PriorityClassInformer. +func (v *version) PriorityClasses() PriorityClassInformer { + return &priorityClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 00000000..5c90f43d --- /dev/null +++ b/vendor/k8s.io/client-go/informers/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + scheduling_v1alpha1 "k8s.io/api/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/scheduling/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PriorityClassInformer provides access to a shared informer and lister for +// PriorityClasses. +type PriorityClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PriorityClassLister +} + +type priorityClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredPriorityClassInformer constructs a new informer for PriorityClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPriorityClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PriorityClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SchedulingV1alpha1().PriorityClasses().Watch(options) + }, + }, + &scheduling_v1alpha1.PriorityClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *priorityClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPriorityClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *priorityClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&scheduling_v1alpha1.PriorityClass{}, f.defaultInformer) +} + +func (f *priorityClassInformer) Lister() v1alpha1.PriorityClassLister { + return v1alpha1.NewPriorityClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/settings/interface.go b/vendor/k8s.io/client-go/informers/settings/interface.go new file mode 100644 index 00000000..53bc6621 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/interface.go @@ -0,0 +1,46 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package settings + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1alpha1 "k8s.io/client-go/informers/settings/v1alpha1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go new file mode 100644 index 00000000..39007ebe --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // PodPresets returns a PodPresetInformer. + PodPresets() PodPresetInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// PodPresets returns a PodPresetInformer. +func (v *version) PodPresets() PodPresetInformer { + return &podPresetInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go new file mode 100644 index 00000000..2e630c73 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/settings/v1alpha1/podpreset.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + settings_v1alpha1 "k8s.io/api/settings/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/settings/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// PodPresetInformer provides access to a shared informer and lister for +// PodPresets. +type PodPresetInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.PodPresetLister +} + +type podPresetInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredPodPresetInformer constructs a new informer for PodPreset type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredPodPresetInformer(client kubernetes.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SettingsV1alpha1().PodPresets(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.SettingsV1alpha1().PodPresets(namespace).Watch(options) + }, + }, + &settings_v1alpha1.PodPreset{}, + resyncPeriod, + indexers, + ) +} + +func (f *podPresetInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredPodPresetInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *podPresetInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&settings_v1alpha1.PodPreset{}, f.defaultInformer) +} + +func (f *podPresetInformer) Lister() v1alpha1.PodPresetLister { + return v1alpha1.NewPodPresetLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/interface.go b/vendor/k8s.io/client-go/informers/storage/interface.go new file mode 100644 index 00000000..b91613a9 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/interface.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package storage + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + v1 "k8s.io/client-go/informers/storage/v1" + v1alpha1 "k8s.io/client-go/informers/storage/v1alpha1" + v1beta1 "k8s.io/client-go/informers/storage/v1beta1" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface + // V1beta1 provides access to shared informers for resources in V1beta1. + V1beta1() v1beta1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + +// V1beta1 returns a new v1beta1.Interface. +func (g *group) V1beta1() v1beta1.Interface { + return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1/interface.go new file mode 100644 index 00000000..fadb1a07 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // StorageClasses returns a StorageClassInformer. + StorageClasses() StorageClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// StorageClasses returns a StorageClassInformer. +func (v *version) StorageClasses() StorageClassInformer { + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go new file mode 100644 index 00000000..341549f0 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1/storageclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1 + +import ( + storage_v1 "k8s.io/api/storage/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1 "k8s.io/client-go/listers/storage/v1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StorageClassInformer provides access to a shared informer and lister for +// StorageClasses. +type StorageClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.StorageClassLister +} + +type storageClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().StorageClasses().List(options) + }, + WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1().StorageClasses().Watch(options) + }, + }, + &storage_v1.StorageClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1.StorageClass{}, f.defaultInformer) +} + +func (f *storageClassInformer) Lister() v1.StorageClassLister { + return v1.NewStorageClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go new file mode 100644 index 00000000..d84eb5fd --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // VolumeAttachments returns a VolumeAttachmentInformer. + VolumeAttachments() VolumeAttachmentInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// VolumeAttachments returns a VolumeAttachmentInformer. +func (v *version) VolumeAttachments() VolumeAttachmentInformer { + return &volumeAttachmentInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..cab9ffc4 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1alpha1 + +import ( + storage_v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/storage/v1alpha1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// VolumeAttachmentInformer provides access to a shared informer and lister for +// VolumeAttachments. +type VolumeAttachmentInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.VolumeAttachmentLister +} + +type volumeAttachmentInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredVolumeAttachmentInformer constructs a new informer for VolumeAttachment type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredVolumeAttachmentInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1alpha1().VolumeAttachments().Watch(options) + }, + }, + &storage_v1alpha1.VolumeAttachment{}, + resyncPeriod, + indexers, + ) +} + +func (f *volumeAttachmentInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredVolumeAttachmentInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *volumeAttachmentInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1alpha1.VolumeAttachment{}, f.defaultInformer) +} + +func (f *volumeAttachmentInformer) Lister() v1alpha1.VolumeAttachmentLister { + return v1alpha1.NewVolumeAttachmentLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go new file mode 100644 index 00000000..7fa1abf5 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // StorageClasses returns a StorageClassInformer. + StorageClasses() StorageClassInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// StorageClasses returns a StorageClassInformer. +func (v *version) StorageClasses() StorageClassInformer { + return &storageClassInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go new file mode 100644 index 00000000..3e96b282 --- /dev/null +++ b/vendor/k8s.io/client-go/informers/storage/v1beta1/storageclass.go @@ -0,0 +1,87 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by informer-gen + +package v1beta1 + +import ( + storage_v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1beta1 "k8s.io/client-go/listers/storage/v1beta1" + cache "k8s.io/client-go/tools/cache" + time "time" +) + +// StorageClassInformer provides access to a shared informer and lister for +// StorageClasses. +type StorageClassInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1beta1.StorageClassLister +} + +type storageClassInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredStorageClassInformer constructs a new informer for StorageClass type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredStorageClassInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().StorageClasses().List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.StorageV1beta1().StorageClasses().Watch(options) + }, + }, + &storage_v1beta1.StorageClass{}, + resyncPeriod, + indexers, + ) +} + +func (f *storageClassInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredStorageClassInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *storageClassInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&storage_v1beta1.StorageClass{}, f.defaultInformer) +} + +func (f *storageClassInformer) Lister() v1beta1.StorageClassLister { + return v1beta1.NewStorageClassLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index 310eb61b..7dcf86a7 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,6 +20,7 @@ import ( glog "github.com/golang/glog" discovery "k8s.io/client-go/discovery" admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" @@ -34,6 +35,7 @@ import ( batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" @@ -43,6 +45,7 @@ import ( schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" rest "k8s.io/client-go/rest" flowcontrol "k8s.io/client-go/util/flowcontrol" @@ -51,8 +54,9 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface AppsV1() appsv1.AppsV1Interface @@ -81,6 +85,9 @@ type Interface interface { CoreV1() corev1.CoreV1Interface // Deprecated: please explicitly pick a version if possible. Core() corev1.CoreV1Interface + EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface // Deprecated: please explicitly pick a version if possible. Extensions() extensionsv1beta1.ExtensionsV1beta1Interface @@ -105,6 +112,7 @@ type Interface interface { StorageV1() storagev1.StorageV1Interface // Deprecated: please explicitly pick a version if possible. Storage() storagev1.StorageV1Interface + StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -112,6 +120,7 @@ type Interface interface { type Clientset struct { *discovery.DiscoveryClient admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client appsV1beta1 *appsv1beta1.AppsV1beta1Client appsV1beta2 *appsv1beta2.AppsV1beta2Client appsV1 *appsv1.AppsV1Client @@ -126,6 +135,7 @@ type Clientset struct { batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client networkingV1 *networkingv1.NetworkingV1Client policyV1beta1 *policyv1beta1.PolicyV1beta1Client @@ -136,6 +146,7 @@ type Clientset struct { settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client storageV1beta1 *storagev1beta1.StorageV1beta1Client storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client } // AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client @@ -143,10 +154,15 @@ func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha return c.admissionregistrationV1alpha1 } +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 +} + // Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. // Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -261,6 +277,17 @@ func (c *Clientset) Core() corev1.CoreV1Interface { return c.coreV1 } +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 @@ -353,6 +380,11 @@ func (c *Clientset) Storage() storagev1.StorageV1Interface { return c.storageV1 } +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return c.storageV1alpha1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -373,6 +405,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -429,6 +465,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -469,6 +509,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -483,6 +527,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) cs.appsV1 = appsv1.NewForConfigOrDie(c) @@ -497,6 +542,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) @@ -507,6 +553,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) + cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -516,6 +563,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { func New(c rest.Interface) *Clientset { var cs Clientset cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) cs.appsV1 = appsv1.New(c) @@ -530,6 +578,7 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coreV1 = corev1.New(c) + cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) @@ -540,6 +589,7 @@ func New(c rest.Interface) *Clientset { cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) + cs.storageV1alpha1 = storagev1alpha1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 984b8bc6..7bfd3361 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -18,6 +18,7 @@ package scheme import ( admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" @@ -32,6 +33,7 @@ import ( batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -41,6 +43,7 @@ import ( schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" storagev1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" @@ -73,6 +76,7 @@ func init() { // correctly. func AddToScheme(scheme *runtime.Scheme) { admissionregistrationv1alpha1.AddToScheme(scheme) + admissionregistrationv1beta1.AddToScheme(scheme) appsv1beta1.AddToScheme(scheme) appsv1beta2.AddToScheme(scheme) appsv1.AddToScheme(scheme) @@ -87,6 +91,7 @@ func AddToScheme(scheme *runtime.Scheme) { batchv2alpha1.AddToScheme(scheme) certificatesv1beta1.AddToScheme(scheme) corev1.AddToScheme(scheme) + eventsv1beta1.AddToScheme(scheme) extensionsv1beta1.AddToScheme(scheme) networkingv1.AddToScheme(scheme) policyv1beta1.AddToScheme(scheme) @@ -97,5 +102,6 @@ func AddToScheme(scheme *runtime.Scheme) { settingsv1alpha1.AddToScheme(scheme) storagev1beta1.AddToScheme(scheme) storagev1.AddToScheme(scheme) + storagev1alpha1.AddToScheme(scheme) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index 95d53391..5150fee3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -25,7 +25,6 @@ import ( type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface - ExternalAdmissionHookConfigurationsGetter InitializerConfigurationsGetter } @@ -34,10 +33,6 @@ type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AdmissionregistrationV1alpha1Client) ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface { - return newExternalAdmissionHookConfigurations(c) -} - func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { return newInitializerConfigurations(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go deleted file mode 100644 index 1ddc6eb4..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/externaladmissionhookconfiguration.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ExternalAdmissionHookConfigurationsGetter has a method to return a ExternalAdmissionHookConfigurationInterface. -// A group's client should implement this interface. -type ExternalAdmissionHookConfigurationsGetter interface { - ExternalAdmissionHookConfigurations() ExternalAdmissionHookConfigurationInterface -} - -// ExternalAdmissionHookConfigurationInterface has methods to work with ExternalAdmissionHookConfiguration resources. -type ExternalAdmissionHookConfigurationInterface interface { - Create(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Update(*v1alpha1.ExternalAdmissionHookConfiguration) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ExternalAdmissionHookConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.ExternalAdmissionHookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) - ExternalAdmissionHookConfigurationExpansion -} - -// externalAdmissionHookConfigurations implements ExternalAdmissionHookConfigurationInterface -type externalAdmissionHookConfigurations struct { - client rest.Interface -} - -// newExternalAdmissionHookConfigurations returns a ExternalAdmissionHookConfigurations -func newExternalAdmissionHookConfigurations(c *AdmissionregistrationV1alpha1Client) *externalAdmissionHookConfigurations { - return &externalAdmissionHookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the externalAdmissionHookConfiguration, and returns the corresponding externalAdmissionHookConfiguration object, and an error if there is any. -func (c *externalAdmissionHookConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ExternalAdmissionHookConfigurations that match those selectors. -func (c *externalAdmissionHookConfigurations) List(opts v1.ListOptions) (result *v1alpha1.ExternalAdmissionHookConfigurationList, err error) { - result = &v1alpha1.ExternalAdmissionHookConfigurationList{} - err = c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested externalAdmissionHookConfigurations. -func (c *externalAdmissionHookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a externalAdmissionHookConfiguration and creates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Create(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Post(). - Resource("externaladmissionhookconfigurations"). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a externalAdmissionHookConfiguration and updates it. Returns the server's representation of the externalAdmissionHookConfiguration, and an error, if there is any. -func (c *externalAdmissionHookConfigurations) Update(externalAdmissionHookConfiguration *v1alpha1.ExternalAdmissionHookConfiguration) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Put(). - Resource("externaladmissionhookconfigurations"). - Name(externalAdmissionHookConfiguration.Name). - Body(externalAdmissionHookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the externalAdmissionHookConfiguration and deletes it. Returns an error if one occurs. -func (c *externalAdmissionHookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *externalAdmissionHookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("externaladmissionhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched externalAdmissionHookConfiguration. -func (c *externalAdmissionHookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ExternalAdmissionHookConfiguration, err error) { - result = &v1alpha1.ExternalAdmissionHookConfiguration{} - err = c.client.Patch(pt). - Resource("externaladmissionhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index eef439ab..ccdfb43f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -16,6 +16,4 @@ limitations under the License. package v1alpha1 -type ExternalAdmissionHookConfigurationExpansion interface{} - type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go new file mode 100644 index 00000000..8d3774b4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1beta1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1beta1Client { + return &AdmissionregistrationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go new file mode 100644 index 00000000..1b50aa19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go new file mode 100644 index 00000000..012a8da7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..36711a50 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + result = &v1beta1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..d6684922 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,145 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + result = &v1beta1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go index 20a09cd4..07936304 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -25,7 +25,11 @@ import ( type AppsV1Interface interface { RESTClient() rest.Interface + ControllerRevisionsGetter DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter } // AppsV1Client is used to interact with features provided by the apps group. @@ -33,10 +37,26 @@ type AppsV1Client struct { restClient rest.Interface } +func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { return newDaemonSets(c, namespace) } +func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + // NewForConfig creates a new AppsV1Client for the given config. func NewForConfig(c *rest.Config) (*AppsV1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go new file mode 100644 index 00000000..1d9f8313 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ControllerRevision, error) + List(opts meta_v1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options meta_v1.GetOptions) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts meta_v1.ListOptions) (result *v1.ControllerRevisionList, err error) { + result = &v1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go new file mode 100644 index 00000000..34c06c8d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1.Deployment) (*v1.Deployment, error) + Update(*v1.Deployment) (*v1.Deployment, error) + UpdateStatus(*v1.Deployment) (*v1.Deployment, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.Deployment, error) + List(opts meta_v1.ListOptions) (*v1.DeploymentList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options meta_v1.GetOptions) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts meta_v1.ListOptions) (result *v1.DeploymentList, err error) { + result = &v1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go index face4d0b..500d67dd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -16,4 +16,12 @@ limitations under the License. package v1 +type ControllerRevisionExpansion interface{} + type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go new file mode 100644 index 00000000..5047b0c5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) + UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.ReplicaSet, error) + List(opts meta_v1.ListOptions) (*v1.ReplicaSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options meta_v1.GetOptions) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts meta_v1.ListOptions) (result *v1.ReplicaSetList, err error) { + result = &v1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go new file mode 100644 index 00000000..2c927ac0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -0,0 +1,172 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1.StatefulSet) (*v1.StatefulSet, error) + Update(*v1.StatefulSet) (*v1.StatefulSet, error) + UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) + Delete(name string, options *meta_v1.DeleteOptions) error + DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error + Get(name string, options meta_v1.GetOptions) (*v1.StatefulSet, error) + List(opts meta_v1.ListOptions) (*v1.StatefulSetList, error) + Watch(opts meta_v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options meta_v1.GetOptions) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts meta_v1.ListOptions) (result *v1.StatefulSetList, err error) { + result = &v1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *meta_v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go new file mode 100644 index 00000000..1b50aa19 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go new file mode 100644 index 00000000..3473e99c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -0,0 +1,155 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1beta1.Event) (*v1beta1.Event, error) + Update(*v1beta1.Event) (*v1beta1.Event, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Event, error) + List(opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *EventsV1beta1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + result = &v1beta1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go new file mode 100644 index 00000000..05cee7fb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type EventsV1beta1Interface interface { + RESTClient() rest.Interface + EventsGetter +} + +// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. +type EventsV1beta1Client struct { + restClient rest.Interface +} + +func (c *EventsV1beta1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +// NewForConfig creates a new EventsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *EventsV1beta1Client { + return &EventsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go new file mode 100644 index 00000000..82b2fb4a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index c1798f67..b4f8886a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -31,7 +31,6 @@ type ExtensionsV1beta1Interface interface { PodSecurityPoliciesGetter ReplicaSetsGetter ScalesGetter - ThirdPartyResourcesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -63,10 +62,6 @@ func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { return newScales(c, namespace) } -func (c *ExtensionsV1beta1Client) ThirdPartyResources() ThirdPartyResourceInterface { - return newThirdPartyResources(c) -} - // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go index d0a3d64b..e693fe68 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -23,5 +23,3 @@ type IngressExpansion interface{} type PodSecurityPolicyExpansion interface{} type ReplicaSetExpansion interface{} - -type ThirdPartyResourceExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go deleted file mode 100644 index 28fb6dcb..00000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/thirdpartyresource.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ThirdPartyResourcesGetter has a method to return a ThirdPartyResourceInterface. -// A group's client should implement this interface. -type ThirdPartyResourcesGetter interface { - ThirdPartyResources() ThirdPartyResourceInterface -} - -// ThirdPartyResourceInterface has methods to work with ThirdPartyResource resources. -type ThirdPartyResourceInterface interface { - Create(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Update(*v1beta1.ThirdPartyResource) (*v1beta1.ThirdPartyResource, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ThirdPartyResource, error) - List(opts v1.ListOptions) (*v1beta1.ThirdPartyResourceList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) - ThirdPartyResourceExpansion -} - -// thirdPartyResources implements ThirdPartyResourceInterface -type thirdPartyResources struct { - client rest.Interface -} - -// newThirdPartyResources returns a ThirdPartyResources -func newThirdPartyResources(c *ExtensionsV1beta1Client) *thirdPartyResources { - return &thirdPartyResources{ - client: c.RESTClient(), - } -} - -// Get takes name of the thirdPartyResource, and returns the corresponding thirdPartyResource object, and an error if there is any. -func (c *thirdPartyResources) Get(name string, options v1.GetOptions) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Get(). - Resource("thirdpartyresources"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ThirdPartyResources that match those selectors. -func (c *thirdPartyResources) List(opts v1.ListOptions) (result *v1beta1.ThirdPartyResourceList, err error) { - result = &v1beta1.ThirdPartyResourceList{} - err = c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested thirdPartyResources. -func (c *thirdPartyResources) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("thirdpartyresources"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a thirdPartyResource and creates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Create(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Post(). - Resource("thirdpartyresources"). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Update takes the representation of a thirdPartyResource and updates it. Returns the server's representation of the thirdPartyResource, and an error, if there is any. -func (c *thirdPartyResources) Update(thirdPartyResource *v1beta1.ThirdPartyResource) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Put(). - Resource("thirdpartyresources"). - Name(thirdPartyResource.Name). - Body(thirdPartyResource). - Do(). - Into(result) - return -} - -// Delete takes name of the thirdPartyResource and deletes it. Returns an error if one occurs. -func (c *thirdPartyResources) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *thirdPartyResources) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("thirdpartyresources"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched thirdPartyResource. -func (c *thirdPartyResources) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ThirdPartyResource, err error) { - result = &v1beta1.ThirdPartyResource{} - err = c.client.Patch(pt). - Resource("thirdpartyresources"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go new file mode 100644 index 00000000..cdaaf620 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go new file mode 100644 index 00000000..afa636a2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go new file mode 100644 index 00000000..3e8c70bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -0,0 +1,88 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeAttachmentsGetter +} + +// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1alpha1Client { + return &StorageV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..08bdfb25 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + result = &v1alpha1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..fb3b0098 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// InitializerConfigurationListerExpansion allows custom methods to be added to +// InitializerConfigurationLister. +type InitializerConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 00000000..60b004ef --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// InitializerConfigurationLister helps list InitializerConfigurations. +type InitializerConfigurationLister interface { + // List lists all InitializerConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) + // Get retrieves the InitializerConfiguration from the index for a given name. + Get(name string) (*v1alpha1.InitializerConfiguration, error) + InitializerConfigurationListerExpansion +} + +// initializerConfigurationLister implements the InitializerConfigurationLister interface. +type initializerConfigurationLister struct { + indexer cache.Indexer +} + +// NewInitializerConfigurationLister returns a new InitializerConfigurationLister. +func NewInitializerConfigurationLister(indexer cache.Indexer) InitializerConfigurationLister { + return &initializerConfigurationLister{indexer: indexer} +} + +// List lists all InitializerConfigurations in the indexer. +func (s *initializerConfigurationLister) List(selector labels.Selector) (ret []*v1alpha1.InitializerConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.InitializerConfiguration)) + }) + return ret, err +} + +// Get retrieves the InitializerConfiguration from the index for a given name. +func (s *initializerConfigurationLister) Get(name string) (*v1alpha1.InitializerConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("initializerconfiguration"), name) + } + return obj.(*v1alpha1.InitializerConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go new file mode 100644 index 00000000..c9bf0fa5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// MutatingWebhookConfigurationListerExpansion allows custom methods to be added to +// MutatingWebhookConfigurationLister. +type MutatingWebhookConfigurationListerExpansion interface{} + +// ValidatingWebhookConfigurationListerExpansion allows custom methods to be added to +// ValidatingWebhookConfigurationLister. +type ValidatingWebhookConfigurationListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 00000000..753dd185 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// MutatingWebhookConfigurationLister helps list MutatingWebhookConfigurations. +type MutatingWebhookConfigurationLister interface { + // List lists all MutatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) + // Get retrieves the MutatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) + MutatingWebhookConfigurationListerExpansion +} + +// mutatingWebhookConfigurationLister implements the MutatingWebhookConfigurationLister interface. +type mutatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewMutatingWebhookConfigurationLister returns a new MutatingWebhookConfigurationLister. +func NewMutatingWebhookConfigurationLister(indexer cache.Indexer) MutatingWebhookConfigurationLister { + return &mutatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all MutatingWebhookConfigurations in the indexer. +func (s *mutatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.MutatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.MutatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the MutatingWebhookConfiguration from the index for a given name. +func (s *mutatingWebhookConfigurationLister) Get(name string) (*v1beta1.MutatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("mutatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.MutatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 00000000..6cb6067a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ValidatingWebhookConfigurationLister helps list ValidatingWebhookConfigurations. +type ValidatingWebhookConfigurationLister interface { + // List lists all ValidatingWebhookConfigurations in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) + // Get retrieves the ValidatingWebhookConfiguration from the index for a given name. + Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) + ValidatingWebhookConfigurationListerExpansion +} + +// validatingWebhookConfigurationLister implements the ValidatingWebhookConfigurationLister interface. +type validatingWebhookConfigurationLister struct { + indexer cache.Indexer +} + +// NewValidatingWebhookConfigurationLister returns a new ValidatingWebhookConfigurationLister. +func NewValidatingWebhookConfigurationLister(indexer cache.Indexer) ValidatingWebhookConfigurationLister { + return &validatingWebhookConfigurationLister{indexer: indexer} +} + +// List lists all ValidatingWebhookConfigurations in the indexer. +func (s *validatingWebhookConfigurationLister) List(selector labels.Selector) (ret []*v1beta1.ValidatingWebhookConfiguration, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ValidatingWebhookConfiguration)) + }) + return ret, err +} + +// Get retrieves the ValidatingWebhookConfiguration from the index for a given name. +func (s *validatingWebhookConfigurationLister) Get(name string) (*v1beta1.ValidatingWebhookConfiguration, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("validatingwebhookconfiguration"), name) + } + return obj.(*v1beta1.ValidatingWebhookConfiguration), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go new file mode 100644 index 00000000..c05d14c2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("controllerrevision"), name) + } + return obj.(*v1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go new file mode 100644 index 00000000..307f8bc7 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("daemonset"), name) + } + return obj.(*v1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go new file mode 100644 index 00000000..83435561 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/daemonset_expansion.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go new file mode 100644 index 00000000..36af9009 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("deployment"), name) + } + return obj.(*v1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go new file mode 100644 index 00000000..7802eca5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*apps.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go new file mode 100644 index 00000000..48917c2c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go new file mode 100644 index 00000000..7e316d6b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicaset"), name) + } + return obj.(*v1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go new file mode 100644 index 00000000..675e615a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go new file mode 100644 index 00000000..fe584038 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("statefulset"), name) + } + return obj.(*v1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go new file mode 100644 index 00000000..b4912976 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go new file mode 100644 index 00000000..f3c85bfa --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1beta1.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta1.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("controllerrevision"), name) + } + return obj.(*v1beta1.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go new file mode 100644 index 00000000..f59f3a96 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) + } + return obj.(*v1beta1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go new file mode 100644 index 00000000..441ceecd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface{} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go new file mode 100644 index 00000000..ec9a419a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go new file mode 100644 index 00000000..f10ef731 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1beta1.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("statefulset"), name) + } + return obj.(*v1beta1.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go new file mode 100644 index 00000000..0741792a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta1/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go new file mode 100644 index 00000000..f9f1ef06 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/controllerrevision.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ControllerRevisionLister helps list ControllerRevisions. +type ControllerRevisionLister interface { + // List lists all ControllerRevisions in the indexer. + List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + // ControllerRevisions returns an object that can list and get ControllerRevisions. + ControllerRevisions(namespace string) ControllerRevisionNamespaceLister + ControllerRevisionListerExpansion +} + +// controllerRevisionLister implements the ControllerRevisionLister interface. +type controllerRevisionLister struct { + indexer cache.Indexer +} + +// NewControllerRevisionLister returns a new ControllerRevisionLister. +func NewControllerRevisionLister(indexer cache.Indexer) ControllerRevisionLister { + return &controllerRevisionLister{indexer: indexer} +} + +// List lists all ControllerRevisions in the indexer. +func (s *controllerRevisionLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ControllerRevision)) + }) + return ret, err +} + +// ControllerRevisions returns an object that can list and get ControllerRevisions. +func (s *controllerRevisionLister) ControllerRevisions(namespace string) ControllerRevisionNamespaceLister { + return controllerRevisionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ControllerRevisionNamespaceLister helps list and get ControllerRevisions. +type ControllerRevisionNamespaceLister interface { + // List lists all ControllerRevisions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) + // Get retrieves the ControllerRevision from the indexer for a given namespace and name. + Get(name string) (*v1beta2.ControllerRevision, error) + ControllerRevisionNamespaceListerExpansion +} + +// controllerRevisionNamespaceLister implements the ControllerRevisionNamespaceLister +// interface. +type controllerRevisionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ControllerRevisions in the indexer for a given namespace. +func (s controllerRevisionNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ControllerRevision, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ControllerRevision)) + }) + return ret, err +} + +// Get retrieves the ControllerRevision from the indexer for a given namespace and name. +func (s controllerRevisionNamespaceLister) Get(name string) (*v1beta2.ControllerRevision, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("controllerrevision"), name) + } + return obj.(*v1beta2.ControllerRevision), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go new file mode 100644 index 00000000..cbdb13ef --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1beta2.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("daemonset"), name) + } + return obj.(*v1beta2.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go new file mode 100644 index 00000000..3b01aaa4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/daemonset_expansion.go @@ -0,0 +1,113 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*apps.DaemonSet, error) { + var selector labels.Selector + var daemonSet *apps.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*apps.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*apps.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go new file mode 100644 index 00000000..0778a9fd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta2.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta2.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("deployment"), name) + } + return obj.(*v1beta2.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go new file mode 100644 index 00000000..1537167a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *apps.ReplicaSet) ([]*apps.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*apps.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go new file mode 100644 index 00000000..6db63d4b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/expansion_generated.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +// ControllerRevisionListerExpansion allows custom methods to be added to +// ControllerRevisionLister. +type ControllerRevisionListerExpansion interface{} + +// ControllerRevisionNamespaceListerExpansion allows custom methods to be added to +// ControllerRevisionNamespaceLister. +type ControllerRevisionNamespaceListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go new file mode 100644 index 00000000..f76e2eeb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1beta2.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("replicaset"), name) + } + return obj.(*v1beta2.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go new file mode 100644 index 00000000..7562fe99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*apps.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*apps.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go new file mode 100644 index 00000000..11cb3e19 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta2.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta2.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("scale"), name) + } + return obj.(*v1beta2.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go new file mode 100644 index 00000000..13ef28f8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StatefulSetLister helps list StatefulSets. +type StatefulSetLister interface { + // List lists all StatefulSets in the indexer. + List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + // StatefulSets returns an object that can list and get StatefulSets. + StatefulSets(namespace string) StatefulSetNamespaceLister + StatefulSetListerExpansion +} + +// statefulSetLister implements the StatefulSetLister interface. +type statefulSetLister struct { + indexer cache.Indexer +} + +// NewStatefulSetLister returns a new StatefulSetLister. +func NewStatefulSetLister(indexer cache.Indexer) StatefulSetLister { + return &statefulSetLister{indexer: indexer} +} + +// List lists all StatefulSets in the indexer. +func (s *statefulSetLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.StatefulSet)) + }) + return ret, err +} + +// StatefulSets returns an object that can list and get StatefulSets. +func (s *statefulSetLister) StatefulSets(namespace string) StatefulSetNamespaceLister { + return statefulSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// StatefulSetNamespaceLister helps list and get StatefulSets. +type StatefulSetNamespaceLister interface { + // List lists all StatefulSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) + // Get retrieves the StatefulSet from the indexer for a given namespace and name. + Get(name string) (*v1beta2.StatefulSet, error) + StatefulSetNamespaceListerExpansion +} + +// statefulSetNamespaceLister implements the StatefulSetNamespaceLister +// interface. +type statefulSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all StatefulSets in the indexer for a given namespace. +func (s statefulSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.StatefulSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta2.StatefulSet)) + }) + return ret, err +} + +// Get retrieves the StatefulSet from the indexer for a given namespace and name. +func (s statefulSetNamespaceLister) Get(name string) (*v1beta2.StatefulSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta2.Resource("statefulset"), name) + } + return obj.(*v1beta2.StatefulSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go new file mode 100644 index 00000000..6fa6b914 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/apps/v1beta2/statefulset_expansion.go @@ -0,0 +1,77 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta2" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// StatefulSetListerExpansion allows custom methods to be added to +// StatefulSetLister. +type StatefulSetListerExpansion interface { + GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) +} + +// StatefulSetNamespaceListerExpansion allows custom methods to be added to +// StatefulSetNamespaceLister. +type StatefulSetNamespaceListerExpansion interface{} + +// GetPodStatefulSets returns a list of StatefulSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching StatefulSets are found. +func (s *statefulSetLister) GetPodStatefulSets(pod *v1.Pod) ([]*apps.StatefulSet, error) { + var selector labels.Selector + var ps *apps.StatefulSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no StatefulSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.StatefulSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var psList []*apps.StatefulSet + for i := range list { + ps = list[i] + if ps.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(ps.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a StatefulSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + psList = append(psList, ps) + } + + if len(psList) == 0 { + return nil, fmt.Errorf("could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return psList, nil +} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go new file mode 100644 index 00000000..f7b00603 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerLister. +type HorizontalPodAutoscalerListerExpansion interface{} + +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerNamespaceLister. +type HorizontalPodAutoscalerNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 00000000..48012203 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. +type HorizontalPodAutoscalerLister interface { + // List lists all HorizontalPodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister + HorizontalPodAutoscalerListerExpansion +} + +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. +type horizontalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { + return &horizontalPodAutoscalerLister{indexer: indexer} +} + +// List lists all HorizontalPodAutoscalers in the indexer. +func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { + return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. +type HorizontalPodAutoscalerNamespaceLister interface { + // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) + // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v1.HorizontalPodAutoscaler, error) + HorizontalPodAutoscalerNamespaceListerExpansion +} + +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister +// interface. +type horizontalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. +func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v1.HorizontalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. +func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v1.HorizontalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("horizontalpodautoscaler"), name) + } + return obj.(*v1.HorizontalPodAutoscaler), nil +} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go new file mode 100644 index 00000000..9e84ef13 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2beta1 + +// HorizontalPodAutoscalerListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerLister. +type HorizontalPodAutoscalerListerExpansion interface{} + +// HorizontalPodAutoscalerNamespaceListerExpansion allows custom methods to be added to +// HorizontalPodAutoscalerNamespaceLister. +type HorizontalPodAutoscalerNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go new file mode 100644 index 00000000..c8fbdecd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2beta1 + +import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// HorizontalPodAutoscalerLister helps list HorizontalPodAutoscalers. +type HorizontalPodAutoscalerLister interface { + // List lists all HorizontalPodAutoscalers in the indexer. + List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + // HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister + HorizontalPodAutoscalerListerExpansion +} + +// horizontalPodAutoscalerLister implements the HorizontalPodAutoscalerLister interface. +type horizontalPodAutoscalerLister struct { + indexer cache.Indexer +} + +// NewHorizontalPodAutoscalerLister returns a new HorizontalPodAutoscalerLister. +func NewHorizontalPodAutoscalerLister(indexer cache.Indexer) HorizontalPodAutoscalerLister { + return &horizontalPodAutoscalerLister{indexer: indexer} +} + +// List lists all HorizontalPodAutoscalers in the indexer. +func (s *horizontalPodAutoscalerLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// HorizontalPodAutoscalers returns an object that can list and get HorizontalPodAutoscalers. +func (s *horizontalPodAutoscalerLister) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerNamespaceLister { + return horizontalPodAutoscalerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// HorizontalPodAutoscalerNamespaceLister helps list and get HorizontalPodAutoscalers. +type HorizontalPodAutoscalerNamespaceLister interface { + // List lists all HorizontalPodAutoscalers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) + // Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. + Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) + HorizontalPodAutoscalerNamespaceListerExpansion +} + +// horizontalPodAutoscalerNamespaceLister implements the HorizontalPodAutoscalerNamespaceLister +// interface. +type horizontalPodAutoscalerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all HorizontalPodAutoscalers in the indexer for a given namespace. +func (s horizontalPodAutoscalerNamespaceLister) List(selector labels.Selector) (ret []*v2beta1.HorizontalPodAutoscaler, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v2beta1.HorizontalPodAutoscaler)) + }) + return ret, err +} + +// Get retrieves the HorizontalPodAutoscaler from the indexer for a given namespace and name. +func (s horizontalPodAutoscalerNamespaceLister) Get(name string) (*v2beta1.HorizontalPodAutoscaler, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2beta1.Resource("horizontalpodautoscaler"), name) + } + return obj.(*v2beta1.HorizontalPodAutoscaler), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go new file mode 100644 index 00000000..38b7e272 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/expansion_generated.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job.go b/vendor/k8s.io/client-go/listers/batch/v1/job.go new file mode 100644 index 00000000..89280d9f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/job.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// JobLister helps list Jobs. +type JobLister interface { + // List lists all Jobs in the indexer. + List(selector labels.Selector) (ret []*v1.Job, err error) + // Jobs returns an object that can list and get Jobs. + Jobs(namespace string) JobNamespaceLister + JobListerExpansion +} + +// jobLister implements the JobLister interface. +type jobLister struct { + indexer cache.Indexer +} + +// NewJobLister returns a new JobLister. +func NewJobLister(indexer cache.Indexer) JobLister { + return &jobLister{indexer: indexer} +} + +// List lists all Jobs in the indexer. +func (s *jobLister) List(selector labels.Selector) (ret []*v1.Job, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Job)) + }) + return ret, err +} + +// Jobs returns an object that can list and get Jobs. +func (s *jobLister) Jobs(namespace string) JobNamespaceLister { + return jobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// JobNamespaceLister helps list and get Jobs. +type JobNamespaceLister interface { + // List lists all Jobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Job, err error) + // Get retrieves the Job from the indexer for a given namespace and name. + Get(name string) (*v1.Job, error) + JobNamespaceListerExpansion +} + +// jobNamespaceLister implements the JobNamespaceLister +// interface. +type jobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Jobs in the indexer for a given namespace. +func (s jobNamespaceLister) List(selector labels.Selector) (ret []*v1.Job, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Job)) + }) + return ret, err +} + +// Get retrieves the Job from the indexer for a given namespace and name. +func (s jobNamespaceLister) Get(name string) (*v1.Job, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("job"), name) + } + return obj.(*v1.Job), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go new file mode 100644 index 00000000..fdcd5f32 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1/job_expansion.go @@ -0,0 +1,68 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + batch "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// JobListerExpansion allows custom methods to be added to +// JobLister. +type JobListerExpansion interface { + // GetPodJobs returns a list of Jobs that potentially + // match a Pod. Only the one specified in the Pod's ControllerRef + // will actually manage it. + // Returns an error only if no matching Jobs are found. + GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error) +} + +// GetPodJobs returns a list of Jobs that potentially +// match a Pod. Only the one specified in the Pod's ControllerRef +// will actually manage it. +// Returns an error only if no matching Jobs are found. +func (l *jobLister) GetPodJobs(pod *v1.Pod) (jobs []batch.Job, err error) { + if len(pod.Labels) == 0 { + err = fmt.Errorf("no jobs found for pod %v because it has no labels", pod.Name) + return + } + + var list []*batch.Job + list, err = l.Jobs(pod.Namespace).List(labels.Everything()) + if err != nil { + return + } + for _, job := range list { + selector, _ := metav1.LabelSelectorAsSelector(job.Spec.Selector) + if !selector.Matches(labels.Set(pod.Labels)) { + continue + } + jobs = append(jobs, *job) + } + if len(jobs) == 0 { + err = fmt.Errorf("could not find jobs for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + return +} + +// JobNamespaceListerExpansion allows custom methods to be added to +// JobNamespaceLister. +type JobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go new file mode 100644 index 00000000..a8fa51ec --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/cronjob.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CronJobLister helps list CronJobs. +type CronJobLister interface { + // List lists all CronJobs in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + // CronJobs returns an object that can list and get CronJobs. + CronJobs(namespace string) CronJobNamespaceLister + CronJobListerExpansion +} + +// cronJobLister implements the CronJobLister interface. +type cronJobLister struct { + indexer cache.Indexer +} + +// NewCronJobLister returns a new CronJobLister. +func NewCronJobLister(indexer cache.Indexer) CronJobLister { + return &cronJobLister{indexer: indexer} +} + +// List lists all CronJobs in the indexer. +func (s *cronJobLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CronJob)) + }) + return ret, err +} + +// CronJobs returns an object that can list and get CronJobs. +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { + return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CronJobNamespaceLister helps list and get CronJobs. +type CronJobNamespaceLister interface { + // List lists all CronJobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) + // Get retrieves the CronJob from the indexer for a given namespace and name. + Get(name string) (*v1beta1.CronJob, error) + CronJobNamespaceListerExpansion +} + +// cronJobNamespaceLister implements the CronJobNamespaceLister +// interface. +type cronJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CronJobs in the indexer for a given namespace. +func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.CronJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CronJob)) + }) + return ret, err +} + +// Get retrieves the CronJob from the indexer for a given namespace and name. +func (s cronJobNamespaceLister) Get(name string) (*v1beta1.CronJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("cronjob"), name) + } + return obj.(*v1beta1.CronJob), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go new file mode 100644 index 00000000..3d84d249 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// CronJobListerExpansion allows custom methods to be added to +// CronJobLister. +type CronJobListerExpansion interface{} + +// CronJobNamespaceListerExpansion allows custom methods to be added to +// CronJobNamespaceLister. +type CronJobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go new file mode 100644 index 00000000..51f5eef5 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v2alpha1/cronjob.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2alpha1 + +import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CronJobLister helps list CronJobs. +type CronJobLister interface { + // List lists all CronJobs in the indexer. + List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) + // CronJobs returns an object that can list and get CronJobs. + CronJobs(namespace string) CronJobNamespaceLister + CronJobListerExpansion +} + +// cronJobLister implements the CronJobLister interface. +type cronJobLister struct { + indexer cache.Indexer +} + +// NewCronJobLister returns a new CronJobLister. +func NewCronJobLister(indexer cache.Indexer) CronJobLister { + return &cronJobLister{indexer: indexer} +} + +// List lists all CronJobs in the indexer. +func (s *cronJobLister) List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CronJob)) + }) + return ret, err +} + +// CronJobs returns an object that can list and get CronJobs. +func (s *cronJobLister) CronJobs(namespace string) CronJobNamespaceLister { + return cronJobNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// CronJobNamespaceLister helps list and get CronJobs. +type CronJobNamespaceLister interface { + // List lists all CronJobs in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) + // Get retrieves the CronJob from the indexer for a given namespace and name. + Get(name string) (*v2alpha1.CronJob, error) + CronJobNamespaceListerExpansion +} + +// cronJobNamespaceLister implements the CronJobNamespaceLister +// interface. +type cronJobNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all CronJobs in the indexer for a given namespace. +func (s cronJobNamespaceLister) List(selector labels.Selector) (ret []*v2alpha1.CronJob, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v2alpha1.CronJob)) + }) + return ret, err +} + +// Get retrieves the CronJob from the indexer for a given namespace and name. +func (s cronJobNamespaceLister) Get(name string) (*v2alpha1.CronJob, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v2alpha1.Resource("cronjob"), name) + } + return obj.(*v2alpha1.CronJob), nil +} diff --git a/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go new file mode 100644 index 00000000..38ac70cd --- /dev/null +++ b/vendor/k8s.io/client-go/listers/batch/v2alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v2alpha1 + +// CronJobListerExpansion allows custom methods to be added to +// CronJobLister. +type CronJobListerExpansion interface{} + +// CronJobNamespaceListerExpansion allows custom methods to be added to +// CronJobNamespaceLister. +type CronJobNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 00000000..425dc6b4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/certificates/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// CertificateSigningRequestLister helps list CertificateSigningRequests. +type CertificateSigningRequestLister interface { + // List lists all CertificateSigningRequests in the indexer. + List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) + // Get retrieves the CertificateSigningRequest from the index for a given name. + Get(name string) (*v1beta1.CertificateSigningRequest, error) + CertificateSigningRequestListerExpansion +} + +// certificateSigningRequestLister implements the CertificateSigningRequestLister interface. +type certificateSigningRequestLister struct { + indexer cache.Indexer +} + +// NewCertificateSigningRequestLister returns a new CertificateSigningRequestLister. +func NewCertificateSigningRequestLister(indexer cache.Indexer) CertificateSigningRequestLister { + return &certificateSigningRequestLister{indexer: indexer} +} + +// List lists all CertificateSigningRequests in the indexer. +func (s *certificateSigningRequestLister) List(selector labels.Selector) (ret []*v1beta1.CertificateSigningRequest, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.CertificateSigningRequest)) + }) + return ret, err +} + +// Get retrieves the CertificateSigningRequest from the index for a given name. +func (s *certificateSigningRequestLister) Get(name string) (*v1beta1.CertificateSigningRequest, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("certificatesigningrequest"), name) + } + return obj.(*v1beta1.CertificateSigningRequest), nil +} diff --git a/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go new file mode 100644 index 00000000..c240be44 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/certificates/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// CertificateSigningRequestListerExpansion allows custom methods to be added to +// CertificateSigningRequestLister. +type CertificateSigningRequestListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go new file mode 100644 index 00000000..6ba67d0b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/componentstatus.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ComponentStatusLister helps list ComponentStatuses. +type ComponentStatusLister interface { + // List lists all ComponentStatuses in the indexer. + List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) + // Get retrieves the ComponentStatus from the index for a given name. + Get(name string) (*v1.ComponentStatus, error) + ComponentStatusListerExpansion +} + +// componentStatusLister implements the ComponentStatusLister interface. +type componentStatusLister struct { + indexer cache.Indexer +} + +// NewComponentStatusLister returns a new ComponentStatusLister. +func NewComponentStatusLister(indexer cache.Indexer) ComponentStatusLister { + return &componentStatusLister{indexer: indexer} +} + +// List lists all ComponentStatuses in the indexer. +func (s *componentStatusLister) List(selector labels.Selector) (ret []*v1.ComponentStatus, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ComponentStatus)) + }) + return ret, err +} + +// Get retrieves the ComponentStatus from the index for a given name. +func (s *componentStatusLister) Get(name string) (*v1.ComponentStatus, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("componentstatus"), name) + } + return obj.(*v1.ComponentStatus), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/configmap.go b/vendor/k8s.io/client-go/listers/core/v1/configmap.go new file mode 100644 index 00000000..e976928d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/configmap.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ConfigMapLister helps list ConfigMaps. +type ConfigMapLister interface { + // List lists all ConfigMaps in the indexer. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // ConfigMaps returns an object that can list and get ConfigMaps. + ConfigMaps(namespace string) ConfigMapNamespaceLister + ConfigMapListerExpansion +} + +// configMapLister implements the ConfigMapLister interface. +type configMapLister struct { + indexer cache.Indexer +} + +// NewConfigMapLister returns a new ConfigMapLister. +func NewConfigMapLister(indexer cache.Indexer) ConfigMapLister { + return &configMapLister{indexer: indexer} +} + +// List lists all ConfigMaps in the indexer. +func (s *configMapLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// ConfigMaps returns an object that can list and get ConfigMaps. +func (s *configMapLister) ConfigMaps(namespace string) ConfigMapNamespaceLister { + return configMapNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ConfigMapNamespaceLister helps list and get ConfigMaps. +type ConfigMapNamespaceLister interface { + // List lists all ConfigMaps in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ConfigMap, err error) + // Get retrieves the ConfigMap from the indexer for a given namespace and name. + Get(name string) (*v1.ConfigMap, error) + ConfigMapNamespaceListerExpansion +} + +// configMapNamespaceLister implements the ConfigMapNamespaceLister +// interface. +type configMapNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ConfigMaps in the indexer for a given namespace. +func (s configMapNamespaceLister) List(selector labels.Selector) (ret []*v1.ConfigMap, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ConfigMap)) + }) + return ret, err +} + +// Get retrieves the ConfigMap from the indexer for a given namespace and name. +func (s configMapNamespaceLister) Get(name string) (*v1.ConfigMap, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("configmap"), name) + } + return obj.(*v1.ConfigMap), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/endpoints.go b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go new file mode 100644 index 00000000..6f5a1133 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/endpoints.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EndpointsLister helps list Endpoints. +type EndpointsLister interface { + // List lists all Endpoints in the indexer. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Endpoints returns an object that can list and get Endpoints. + Endpoints(namespace string) EndpointsNamespaceLister + EndpointsListerExpansion +} + +// endpointsLister implements the EndpointsLister interface. +type endpointsLister struct { + indexer cache.Indexer +} + +// NewEndpointsLister returns a new EndpointsLister. +func NewEndpointsLister(indexer cache.Indexer) EndpointsLister { + return &endpointsLister{indexer: indexer} +} + +// List lists all Endpoints in the indexer. +func (s *endpointsLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Endpoints returns an object that can list and get Endpoints. +func (s *endpointsLister) Endpoints(namespace string) EndpointsNamespaceLister { + return endpointsNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EndpointsNamespaceLister helps list and get Endpoints. +type EndpointsNamespaceLister interface { + // List lists all Endpoints in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Endpoints, err error) + // Get retrieves the Endpoints from the indexer for a given namespace and name. + Get(name string) (*v1.Endpoints, error) + EndpointsNamespaceListerExpansion +} + +// endpointsNamespaceLister implements the EndpointsNamespaceLister +// interface. +type endpointsNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Endpoints in the indexer for a given namespace. +func (s endpointsNamespaceLister) List(selector labels.Selector) (ret []*v1.Endpoints, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Endpoints)) + }) + return ret, err +} + +// Get retrieves the Endpoints from the indexer for a given namespace and name. +func (s endpointsNamespaceLister) Get(name string) (*v1.Endpoints, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("endpoints"), name) + } + return obj.(*v1.Endpoints), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/event.go b/vendor/k8s.io/client-go/listers/core/v1/event.go new file mode 100644 index 00000000..b087cd8b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/event.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("event"), name) + } + return obj.(*v1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go new file mode 100644 index 00000000..a96db8dc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/expansion_generated.go @@ -0,0 +1,111 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ComponentStatusListerExpansion allows custom methods to be added to +// ComponentStatusLister. +type ComponentStatusListerExpansion interface{} + +// ConfigMapListerExpansion allows custom methods to be added to +// ConfigMapLister. +type ConfigMapListerExpansion interface{} + +// ConfigMapNamespaceListerExpansion allows custom methods to be added to +// ConfigMapNamespaceLister. +type ConfigMapNamespaceListerExpansion interface{} + +// EndpointsListerExpansion allows custom methods to be added to +// EndpointsLister. +type EndpointsListerExpansion interface{} + +// EndpointsNamespaceListerExpansion allows custom methods to be added to +// EndpointsNamespaceLister. +type EndpointsNamespaceListerExpansion interface{} + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} + +// LimitRangeListerExpansion allows custom methods to be added to +// LimitRangeLister. +type LimitRangeListerExpansion interface{} + +// LimitRangeNamespaceListerExpansion allows custom methods to be added to +// LimitRangeNamespaceLister. +type LimitRangeNamespaceListerExpansion interface{} + +// NamespaceListerExpansion allows custom methods to be added to +// NamespaceLister. +type NamespaceListerExpansion interface{} + +// PersistentVolumeListerExpansion allows custom methods to be added to +// PersistentVolumeLister. +type PersistentVolumeListerExpansion interface{} + +// PersistentVolumeClaimListerExpansion allows custom methods to be added to +// PersistentVolumeClaimLister. +type PersistentVolumeClaimListerExpansion interface{} + +// PersistentVolumeClaimNamespaceListerExpansion allows custom methods to be added to +// PersistentVolumeClaimNamespaceLister. +type PersistentVolumeClaimNamespaceListerExpansion interface{} + +// PodListerExpansion allows custom methods to be added to +// PodLister. +type PodListerExpansion interface{} + +// PodNamespaceListerExpansion allows custom methods to be added to +// PodNamespaceLister. +type PodNamespaceListerExpansion interface{} + +// PodTemplateListerExpansion allows custom methods to be added to +// PodTemplateLister. +type PodTemplateListerExpansion interface{} + +// PodTemplateNamespaceListerExpansion allows custom methods to be added to +// PodTemplateNamespaceLister. +type PodTemplateNamespaceListerExpansion interface{} + +// ResourceQuotaListerExpansion allows custom methods to be added to +// ResourceQuotaLister. +type ResourceQuotaListerExpansion interface{} + +// ResourceQuotaNamespaceListerExpansion allows custom methods to be added to +// ResourceQuotaNamespaceLister. +type ResourceQuotaNamespaceListerExpansion interface{} + +// SecretListerExpansion allows custom methods to be added to +// SecretLister. +type SecretListerExpansion interface{} + +// SecretNamespaceListerExpansion allows custom methods to be added to +// SecretNamespaceLister. +type SecretNamespaceListerExpansion interface{} + +// ServiceAccountListerExpansion allows custom methods to be added to +// ServiceAccountLister. +type ServiceAccountListerExpansion interface{} + +// ServiceAccountNamespaceListerExpansion allows custom methods to be added to +// ServiceAccountNamespaceLister. +type ServiceAccountNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/core/v1/limitrange.go b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go new file mode 100644 index 00000000..f1994375 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/limitrange.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// LimitRangeLister helps list LimitRanges. +type LimitRangeLister interface { + // List lists all LimitRanges in the indexer. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // LimitRanges returns an object that can list and get LimitRanges. + LimitRanges(namespace string) LimitRangeNamespaceLister + LimitRangeListerExpansion +} + +// limitRangeLister implements the LimitRangeLister interface. +type limitRangeLister struct { + indexer cache.Indexer +} + +// NewLimitRangeLister returns a new LimitRangeLister. +func NewLimitRangeLister(indexer cache.Indexer) LimitRangeLister { + return &limitRangeLister{indexer: indexer} +} + +// List lists all LimitRanges in the indexer. +func (s *limitRangeLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// LimitRanges returns an object that can list and get LimitRanges. +func (s *limitRangeLister) LimitRanges(namespace string) LimitRangeNamespaceLister { + return limitRangeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// LimitRangeNamespaceLister helps list and get LimitRanges. +type LimitRangeNamespaceLister interface { + // List lists all LimitRanges in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.LimitRange, err error) + // Get retrieves the LimitRange from the indexer for a given namespace and name. + Get(name string) (*v1.LimitRange, error) + LimitRangeNamespaceListerExpansion +} + +// limitRangeNamespaceLister implements the LimitRangeNamespaceLister +// interface. +type limitRangeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all LimitRanges in the indexer for a given namespace. +func (s limitRangeNamespaceLister) List(selector labels.Selector) (ret []*v1.LimitRange, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.LimitRange)) + }) + return ret, err +} + +// Get retrieves the LimitRange from the indexer for a given namespace and name. +func (s limitRangeNamespaceLister) Get(name string) (*v1.LimitRange, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("limitrange"), name) + } + return obj.(*v1.LimitRange), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/namespace.go b/vendor/k8s.io/client-go/listers/core/v1/namespace.go new file mode 100644 index 00000000..21be6878 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/namespace.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NamespaceLister helps list Namespaces. +type NamespaceLister interface { + // List lists all Namespaces in the indexer. + List(selector labels.Selector) (ret []*v1.Namespace, err error) + // Get retrieves the Namespace from the index for a given name. + Get(name string) (*v1.Namespace, error) + NamespaceListerExpansion +} + +// namespaceLister implements the NamespaceLister interface. +type namespaceLister struct { + indexer cache.Indexer +} + +// NewNamespaceLister returns a new NamespaceLister. +func NewNamespaceLister(indexer cache.Indexer) NamespaceLister { + return &namespaceLister{indexer: indexer} +} + +// List lists all Namespaces in the indexer. +func (s *namespaceLister) List(selector labels.Selector) (ret []*v1.Namespace, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Namespace)) + }) + return ret, err +} + +// Get retrieves the Namespace from the index for a given name. +func (s *namespaceLister) Get(name string) (*v1.Namespace, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("namespace"), name) + } + return obj.(*v1.Namespace), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node.go b/vendor/k8s.io/client-go/listers/core/v1/node.go new file mode 100644 index 00000000..d43a682c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NodeLister helps list Nodes. +type NodeLister interface { + // List lists all Nodes in the indexer. + List(selector labels.Selector) (ret []*v1.Node, err error) + // Get retrieves the Node from the index for a given name. + Get(name string) (*v1.Node, error) + NodeListerExpansion +} + +// nodeLister implements the NodeLister interface. +type nodeLister struct { + indexer cache.Indexer +} + +// NewNodeLister returns a new NodeLister. +func NewNodeLister(indexer cache.Indexer) NodeLister { + return &nodeLister{indexer: indexer} +} + +// List lists all Nodes in the indexer. +func (s *nodeLister) List(selector labels.Selector) (ret []*v1.Node, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Node)) + }) + return ret, err +} + +// Get retrieves the Node from the index for a given name. +func (s *nodeLister) Get(name string) (*v1.Node, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("node"), name) + } + return obj.(*v1.Node), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go new file mode 100644 index 00000000..9e5c55ab --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/node_expansion.go @@ -0,0 +1,48 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// NodeConditionPredicate is a function that indicates whether the given node's conditions meet +// some set of criteria defined by the function. +type NodeConditionPredicate func(node *v1.Node) bool + +// NodeListerExpansion allows custom methods to be added to +// NodeLister. +type NodeListerExpansion interface { + ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) +} + +func (l *nodeLister) ListWithPredicate(predicate NodeConditionPredicate) ([]*v1.Node, error) { + nodes, err := l.List(labels.Everything()) + if err != nil { + return nil, err + } + + var filtered []*v1.Node + for i := range nodes { + if predicate(nodes[i]) { + filtered = append(filtered, nodes[i]) + } + } + + return filtered, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go new file mode 100644 index 00000000..593ba14e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolume.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeLister helps list PersistentVolumes. +type PersistentVolumeLister interface { + // List lists all PersistentVolumes in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) + // Get retrieves the PersistentVolume from the index for a given name. + Get(name string) (*v1.PersistentVolume, error) + PersistentVolumeListerExpansion +} + +// persistentVolumeLister implements the PersistentVolumeLister interface. +type persistentVolumeLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeLister returns a new PersistentVolumeLister. +func NewPersistentVolumeLister(indexer cache.Indexer) PersistentVolumeLister { + return &persistentVolumeLister{indexer: indexer} +} + +// List lists all PersistentVolumes in the indexer. +func (s *persistentVolumeLister) List(selector labels.Selector) (ret []*v1.PersistentVolume, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolume)) + }) + return ret, err +} + +// Get retrieves the PersistentVolume from the index for a given name. +func (s *persistentVolumeLister) Get(name string) (*v1.PersistentVolume, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolume"), name) + } + return obj.(*v1.PersistentVolume), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go new file mode 100644 index 00000000..72ddac93 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/persistentvolumeclaim.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PersistentVolumeClaimLister helps list PersistentVolumeClaims. +type PersistentVolumeClaimLister interface { + // List lists all PersistentVolumeClaims in the indexer. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. + PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister + PersistentVolumeClaimListerExpansion +} + +// persistentVolumeClaimLister implements the PersistentVolumeClaimLister interface. +type persistentVolumeClaimLister struct { + indexer cache.Indexer +} + +// NewPersistentVolumeClaimLister returns a new PersistentVolumeClaimLister. +func NewPersistentVolumeClaimLister(indexer cache.Indexer) PersistentVolumeClaimLister { + return &persistentVolumeClaimLister{indexer: indexer} +} + +// List lists all PersistentVolumeClaims in the indexer. +func (s *persistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// PersistentVolumeClaims returns an object that can list and get PersistentVolumeClaims. +func (s *persistentVolumeClaimLister) PersistentVolumeClaims(namespace string) PersistentVolumeClaimNamespaceLister { + return persistentVolumeClaimNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PersistentVolumeClaimNamespaceLister helps list and get PersistentVolumeClaims. +type PersistentVolumeClaimNamespaceLister interface { + // List lists all PersistentVolumeClaims in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) + // Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. + Get(name string) (*v1.PersistentVolumeClaim, error) + PersistentVolumeClaimNamespaceListerExpansion +} + +// persistentVolumeClaimNamespaceLister implements the PersistentVolumeClaimNamespaceLister +// interface. +type persistentVolumeClaimNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PersistentVolumeClaims in the indexer for a given namespace. +func (s persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PersistentVolumeClaim)) + }) + return ret, err +} + +// Get retrieves the PersistentVolumeClaim from the indexer for a given namespace and name. +func (s persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("persistentvolumeclaim"), name) + } + return obj.(*v1.PersistentVolumeClaim), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/pod.go b/vendor/k8s.io/client-go/listers/core/v1/pod.go new file mode 100644 index 00000000..6cf4a842 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/pod.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodLister helps list Pods. +type PodLister interface { + // List lists all Pods in the indexer. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Pods returns an object that can list and get Pods. + Pods(namespace string) PodNamespaceLister + PodListerExpansion +} + +// podLister implements the PodLister interface. +type podLister struct { + indexer cache.Indexer +} + +// NewPodLister returns a new PodLister. +func NewPodLister(indexer cache.Indexer) PodLister { + return &podLister{indexer: indexer} +} + +// List lists all Pods in the indexer. +func (s *podLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Pods returns an object that can list and get Pods. +func (s *podLister) Pods(namespace string) PodNamespaceLister { + return podNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodNamespaceLister helps list and get Pods. +type PodNamespaceLister interface { + // List lists all Pods in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Pod, err error) + // Get retrieves the Pod from the indexer for a given namespace and name. + Get(name string) (*v1.Pod, error) + PodNamespaceListerExpansion +} + +// podNamespaceLister implements the PodNamespaceLister +// interface. +type podNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Pods in the indexer for a given namespace. +func (s podNamespaceLister) List(selector labels.Selector) (ret []*v1.Pod, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Pod)) + }) + return ret, err +} + +// Get retrieves the Pod from the indexer for a given namespace and name. +func (s podNamespaceLister) Get(name string) (*v1.Pod, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("pod"), name) + } + return obj.(*v1.Pod), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go new file mode 100644 index 00000000..d825c747 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/podtemplate.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodTemplateLister helps list PodTemplates. +type PodTemplateLister interface { + // List lists all PodTemplates in the indexer. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // PodTemplates returns an object that can list and get PodTemplates. + PodTemplates(namespace string) PodTemplateNamespaceLister + PodTemplateListerExpansion +} + +// podTemplateLister implements the PodTemplateLister interface. +type podTemplateLister struct { + indexer cache.Indexer +} + +// NewPodTemplateLister returns a new PodTemplateLister. +func NewPodTemplateLister(indexer cache.Indexer) PodTemplateLister { + return &podTemplateLister{indexer: indexer} +} + +// List lists all PodTemplates in the indexer. +func (s *podTemplateLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// PodTemplates returns an object that can list and get PodTemplates. +func (s *podTemplateLister) PodTemplates(namespace string) PodTemplateNamespaceLister { + return podTemplateNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodTemplateNamespaceLister helps list and get PodTemplates. +type PodTemplateNamespaceLister interface { + // List lists all PodTemplates in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.PodTemplate, err error) + // Get retrieves the PodTemplate from the indexer for a given namespace and name. + Get(name string) (*v1.PodTemplate, error) + PodTemplateNamespaceListerExpansion +} + +// podTemplateNamespaceLister implements the PodTemplateNamespaceLister +// interface. +type podTemplateNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodTemplates in the indexer for a given namespace. +func (s podTemplateNamespaceLister) List(selector labels.Selector) (ret []*v1.PodTemplate, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.PodTemplate)) + }) + return ret, err +} + +// Get retrieves the PodTemplate from the indexer for a given namespace and name. +func (s podTemplateNamespaceLister) Get(name string) (*v1.PodTemplate, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("podtemplate"), name) + } + return obj.(*v1.PodTemplate), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go new file mode 100644 index 00000000..6670a9d9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicationControllerLister helps list ReplicationControllers. +type ReplicationControllerLister interface { + // List lists all ReplicationControllers in the indexer. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // ReplicationControllers returns an object that can list and get ReplicationControllers. + ReplicationControllers(namespace string) ReplicationControllerNamespaceLister + ReplicationControllerListerExpansion +} + +// replicationControllerLister implements the ReplicationControllerLister interface. +type replicationControllerLister struct { + indexer cache.Indexer +} + +// NewReplicationControllerLister returns a new ReplicationControllerLister. +func NewReplicationControllerLister(indexer cache.Indexer) ReplicationControllerLister { + return &replicationControllerLister{indexer: indexer} +} + +// List lists all ReplicationControllers in the indexer. +func (s *replicationControllerLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// ReplicationControllers returns an object that can list and get ReplicationControllers. +func (s *replicationControllerLister) ReplicationControllers(namespace string) ReplicationControllerNamespaceLister { + return replicationControllerNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicationControllerNamespaceLister helps list and get ReplicationControllers. +type ReplicationControllerNamespaceLister interface { + // List lists all ReplicationControllers in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ReplicationController, err error) + // Get retrieves the ReplicationController from the indexer for a given namespace and name. + Get(name string) (*v1.ReplicationController, error) + ReplicationControllerNamespaceListerExpansion +} + +// replicationControllerNamespaceLister implements the ReplicationControllerNamespaceLister +// interface. +type replicationControllerNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicationControllers in the indexer for a given namespace. +func (s replicationControllerNamespaceLister) List(selector labels.Selector) (ret []*v1.ReplicationController, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ReplicationController)) + }) + return ret, err +} + +// Get retrieves the ReplicationController from the indexer for a given namespace and name. +func (s replicationControllerNamespaceLister) Get(name string) (*v1.ReplicationController, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("replicationcontroller"), name) + } + return obj.(*v1.ReplicationController), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go new file mode 100644 index 00000000..b031d521 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/replicationcontroller_expansion.go @@ -0,0 +1,66 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicationControllerListerExpansion allows custom methods to be added to +// ReplicationControllerLister. +type ReplicationControllerListerExpansion interface { + GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) +} + +// ReplicationControllerNamespaceListerExpansion allows custom methods to be added to +// ReplicationControllerNamespaceLister. +type ReplicationControllerNamespaceListerExpansion interface{} + +// GetPodControllers returns a list of ReplicationControllers that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicationControllers are found. +func (s *replicationControllerLister) GetPodControllers(pod *v1.Pod) ([]*v1.ReplicationController, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no controllers found for pod %v because it has no labels", pod.Name) + } + + items, err := s.ReplicationControllers(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var controllers []*v1.ReplicationController + for i := range items { + rc := items[i] + selector := labels.Set(rc.Spec.Selector).AsSelectorPreValidated() + + // If an rc with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + controllers = append(controllers, rc) + } + + if len(controllers) == 0 { + return nil, fmt.Errorf("could not find controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return controllers, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go new file mode 100644 index 00000000..713a4151 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/resourcequota.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ResourceQuotaLister helps list ResourceQuotas. +type ResourceQuotaLister interface { + // List lists all ResourceQuotas in the indexer. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // ResourceQuotas returns an object that can list and get ResourceQuotas. + ResourceQuotas(namespace string) ResourceQuotaNamespaceLister + ResourceQuotaListerExpansion +} + +// resourceQuotaLister implements the ResourceQuotaLister interface. +type resourceQuotaLister struct { + indexer cache.Indexer +} + +// NewResourceQuotaLister returns a new ResourceQuotaLister. +func NewResourceQuotaLister(indexer cache.Indexer) ResourceQuotaLister { + return &resourceQuotaLister{indexer: indexer} +} + +// List lists all ResourceQuotas in the indexer. +func (s *resourceQuotaLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// ResourceQuotas returns an object that can list and get ResourceQuotas. +func (s *resourceQuotaLister) ResourceQuotas(namespace string) ResourceQuotaNamespaceLister { + return resourceQuotaNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ResourceQuotaNamespaceLister helps list and get ResourceQuotas. +type ResourceQuotaNamespaceLister interface { + // List lists all ResourceQuotas in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) + // Get retrieves the ResourceQuota from the indexer for a given namespace and name. + Get(name string) (*v1.ResourceQuota, error) + ResourceQuotaNamespaceListerExpansion +} + +// resourceQuotaNamespaceLister implements the ResourceQuotaNamespaceLister +// interface. +type resourceQuotaNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ResourceQuotas in the indexer for a given namespace. +func (s resourceQuotaNamespaceLister) List(selector labels.Selector) (ret []*v1.ResourceQuota, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ResourceQuota)) + }) + return ret, err +} + +// Get retrieves the ResourceQuota from the indexer for a given namespace and name. +func (s resourceQuotaNamespaceLister) Get(name string) (*v1.ResourceQuota, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("resourcequota"), name) + } + return obj.(*v1.ResourceQuota), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/secret.go b/vendor/k8s.io/client-go/listers/core/v1/secret.go new file mode 100644 index 00000000..26ef13d9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/secret.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// SecretLister helps list Secrets. +type SecretLister interface { + // List lists all Secrets in the indexer. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Secrets returns an object that can list and get Secrets. + Secrets(namespace string) SecretNamespaceLister + SecretListerExpansion +} + +// secretLister implements the SecretLister interface. +type secretLister struct { + indexer cache.Indexer +} + +// NewSecretLister returns a new SecretLister. +func NewSecretLister(indexer cache.Indexer) SecretLister { + return &secretLister{indexer: indexer} +} + +// List lists all Secrets in the indexer. +func (s *secretLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Secrets returns an object that can list and get Secrets. +func (s *secretLister) Secrets(namespace string) SecretNamespaceLister { + return secretNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// SecretNamespaceLister helps list and get Secrets. +type SecretNamespaceLister interface { + // List lists all Secrets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Secret, err error) + // Get retrieves the Secret from the indexer for a given namespace and name. + Get(name string) (*v1.Secret, error) + SecretNamespaceListerExpansion +} + +// secretNamespaceLister implements the SecretNamespaceLister +// interface. +type secretNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Secrets in the indexer for a given namespace. +func (s secretNamespaceLister) List(selector labels.Selector) (ret []*v1.Secret, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Secret)) + }) + return ret, err +} + +// Get retrieves the Secret from the indexer for a given namespace and name. +func (s secretNamespaceLister) Get(name string) (*v1.Secret, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("secret"), name) + } + return obj.(*v1.Secret), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service.go b/vendor/k8s.io/client-go/listers/core/v1/service.go new file mode 100644 index 00000000..895a6922 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceLister helps list Services. +type ServiceLister interface { + // List lists all Services in the indexer. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Services returns an object that can list and get Services. + Services(namespace string) ServiceNamespaceLister + ServiceListerExpansion +} + +// serviceLister implements the ServiceLister interface. +type serviceLister struct { + indexer cache.Indexer +} + +// NewServiceLister returns a new ServiceLister. +func NewServiceLister(indexer cache.Indexer) ServiceLister { + return &serviceLister{indexer: indexer} +} + +// List lists all Services in the indexer. +func (s *serviceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Services returns an object that can list and get Services. +func (s *serviceLister) Services(namespace string) ServiceNamespaceLister { + return serviceNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceNamespaceLister helps list and get Services. +type ServiceNamespaceLister interface { + // List lists all Services in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Service, err error) + // Get retrieves the Service from the indexer for a given namespace and name. + Get(name string) (*v1.Service, error) + ServiceNamespaceListerExpansion +} + +// serviceNamespaceLister implements the ServiceNamespaceLister +// interface. +type serviceNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Services in the indexer for a given namespace. +func (s serviceNamespaceLister) List(selector labels.Selector) (ret []*v1.Service, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Service)) + }) + return ret, err +} + +// Get retrieves the Service from the indexer for a given namespace and name. +func (s serviceNamespaceLister) Get(name string) (*v1.Service, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("service"), name) + } + return obj.(*v1.Service), nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go new file mode 100644 index 00000000..e283d250 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/service_expansion.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ServiceListerExpansion allows custom methods to be added to +// ServiceLister. +type ServiceListerExpansion interface { + GetPodServices(pod *v1.Pod) ([]*v1.Service, error) +} + +// ServiceNamespaceListerExpansion allows custom methods to be added to +// ServiceNamespaceLister. +type ServiceNamespaceListerExpansion interface{} + +// TODO: Move this back to scheduler as a helper function that takes a Store, +// rather than a method of ServiceLister. +func (s *serviceLister) GetPodServices(pod *v1.Pod) ([]*v1.Service, error) { + allServices, err := s.Services(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var services []*v1.Service + for i := range allServices { + service := allServices[i] + if service.Spec.Selector == nil { + // services with nil selectors match nothing, not everything. + continue + } + selector := labels.Set(service.Spec.Selector).AsSelectorPreValidated() + if selector.Matches(labels.Set(pod.Labels)) { + services = append(services, service) + } + } + + return services, nil +} diff --git a/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go new file mode 100644 index 00000000..2245d5d4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/core/v1/serviceaccount.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ServiceAccountLister helps list ServiceAccounts. +type ServiceAccountLister interface { + // List lists all ServiceAccounts in the indexer. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // ServiceAccounts returns an object that can list and get ServiceAccounts. + ServiceAccounts(namespace string) ServiceAccountNamespaceLister + ServiceAccountListerExpansion +} + +// serviceAccountLister implements the ServiceAccountLister interface. +type serviceAccountLister struct { + indexer cache.Indexer +} + +// NewServiceAccountLister returns a new ServiceAccountLister. +func NewServiceAccountLister(indexer cache.Indexer) ServiceAccountLister { + return &serviceAccountLister{indexer: indexer} +} + +// List lists all ServiceAccounts in the indexer. +func (s *serviceAccountLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// ServiceAccounts returns an object that can list and get ServiceAccounts. +func (s *serviceAccountLister) ServiceAccounts(namespace string) ServiceAccountNamespaceLister { + return serviceAccountNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ServiceAccountNamespaceLister helps list and get ServiceAccounts. +type ServiceAccountNamespaceLister interface { + // List lists all ServiceAccounts in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) + // Get retrieves the ServiceAccount from the indexer for a given namespace and name. + Get(name string) (*v1.ServiceAccount, error) + ServiceAccountNamespaceListerExpansion +} + +// serviceAccountNamespaceLister implements the ServiceAccountNamespaceLister +// interface. +type serviceAccountNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ServiceAccounts in the indexer for a given namespace. +func (s serviceAccountNamespaceLister) List(selector labels.Selector) (ret []*v1.ServiceAccount, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ServiceAccount)) + }) + return ret, err +} + +// Get retrieves the ServiceAccount from the indexer for a given namespace and name. +func (s serviceAccountNamespaceLister) Get(name string) (*v1.ServiceAccount, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("serviceaccount"), name) + } + return obj.(*v1.ServiceAccount), nil +} diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/event.go b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go new file mode 100644 index 00000000..bca3c452 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/event.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EventLister helps list Events. +type EventLister interface { + // List lists all Events in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Events returns an object that can list and get Events. + Events(namespace string) EventNamespaceLister + EventListerExpansion +} + +// eventLister implements the EventLister interface. +type eventLister struct { + indexer cache.Indexer +} + +// NewEventLister returns a new EventLister. +func NewEventLister(indexer cache.Indexer) EventLister { + return &eventLister{indexer: indexer} +} + +// List lists all Events in the indexer. +func (s *eventLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Events returns an object that can list and get Events. +func (s *eventLister) Events(namespace string) EventNamespaceLister { + return eventNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EventNamespaceLister helps list and get Events. +type EventNamespaceLister interface { + // List lists all Events in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Event, err error) + // Get retrieves the Event from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Event, error) + EventNamespaceListerExpansion +} + +// eventNamespaceLister implements the EventNamespaceLister +// interface. +type eventNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Events in the indexer for a given namespace. +func (s eventNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Event, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Event)) + }) + return ret, err +} + +// Get retrieves the Event from the indexer for a given namespace and name. +func (s eventNamespaceLister) Get(name string) (*v1beta1.Event, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("event"), name) + } + return obj.(*v1beta1.Event), nil +} diff --git a/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go new file mode 100644 index 00000000..7e8fb62b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/events/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// EventListerExpansion allows custom methods to be added to +// EventLister. +type EventListerExpansion interface{} + +// EventNamespaceListerExpansion allows custom methods to be added to +// EventNamespaceLister. +type EventNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go new file mode 100644 index 00000000..4672a5cb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DaemonSetLister helps list DaemonSets. +type DaemonSetLister interface { + // List lists all DaemonSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + // DaemonSets returns an object that can list and get DaemonSets. + DaemonSets(namespace string) DaemonSetNamespaceLister + DaemonSetListerExpansion +} + +// daemonSetLister implements the DaemonSetLister interface. +type daemonSetLister struct { + indexer cache.Indexer +} + +// NewDaemonSetLister returns a new DaemonSetLister. +func NewDaemonSetLister(indexer cache.Indexer) DaemonSetLister { + return &daemonSetLister{indexer: indexer} +} + +// List lists all DaemonSets in the indexer. +func (s *daemonSetLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.DaemonSet)) + }) + return ret, err +} + +// DaemonSets returns an object that can list and get DaemonSets. +func (s *daemonSetLister) DaemonSets(namespace string) DaemonSetNamespaceLister { + return daemonSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DaemonSetNamespaceLister helps list and get DaemonSets. +type DaemonSetNamespaceLister interface { + // List lists all DaemonSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) + // Get retrieves the DaemonSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.DaemonSet, error) + DaemonSetNamespaceListerExpansion +} + +// daemonSetNamespaceLister implements the DaemonSetNamespaceLister +// interface. +type daemonSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all DaemonSets in the indexer for a given namespace. +func (s daemonSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.DaemonSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.DaemonSet)) + }) + return ret, err +} + +// Get retrieves the DaemonSet from the indexer for a given namespace and name. +func (s daemonSetNamespaceLister) Get(name string) (*v1beta1.DaemonSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("daemonset"), name) + } + return obj.(*v1beta1.DaemonSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go new file mode 100644 index 00000000..336a4ed8 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/daemonset_expansion.go @@ -0,0 +1,114 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + apps "k8s.io/api/apps/v1beta1" + "k8s.io/api/core/v1" + "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DaemonSetListerExpansion allows custom methods to be added to +// DaemonSetLister. +type DaemonSetListerExpansion interface { + GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error) + GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error) +} + +// DaemonSetNamespaceListerExpansion allows custom methods to be added to +// DaemonSetNamespaceLister. +type DaemonSetNamespaceListerExpansion interface{} + +// GetPodDaemonSets returns a list of DaemonSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetPodDaemonSets(pod *v1.Pod) ([]*v1beta1.DaemonSet, error) { + var selector labels.Selector + var daemonSet *v1beta1.DaemonSet + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no daemon sets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.DaemonSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*v1beta1.DaemonSet + for i := range list { + daemonSet = list[i] + if daemonSet.Namespace != pod.Namespace { + continue + } + selector, err = metav1.LabelSelectorAsSelector(daemonSet.Spec.Selector) + if err != nil { + // this should not happen if the DaemonSet passed validation + return nil, err + } + + // If a daemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + daemonSets = append(daemonSets, daemonSet) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find daemon set for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return daemonSets, nil +} + +// GetHistoryDaemonSets returns a list of DaemonSets that potentially +// match a ControllerRevision. Only the one specified in the ControllerRevision's ControllerRef +// will actually manage it. +// Returns an error only if no matching DaemonSets are found. +func (s *daemonSetLister) GetHistoryDaemonSets(history *apps.ControllerRevision) ([]*v1beta1.DaemonSet, error) { + if len(history.Labels) == 0 { + return nil, fmt.Errorf("no DaemonSet found for ControllerRevision %s because it has no labels", history.Name) + } + + list, err := s.DaemonSets(history.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var daemonSets []*v1beta1.DaemonSet + for _, ds := range list { + selector, err := metav1.LabelSelectorAsSelector(ds.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a DaemonSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(history.Labels)) { + continue + } + daemonSets = append(daemonSets, ds) + } + + if len(daemonSets) == 0 { + return nil, fmt.Errorf("could not find DaemonSets for ControllerRevision %s in namespace %s with labels: %v", history.Name, history.Namespace, history.Labels) + } + + return daemonSets, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go new file mode 100644 index 00000000..4c17085d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// DeploymentLister helps list Deployments. +type DeploymentLister interface { + // List lists all Deployments in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Deployments returns an object that can list and get Deployments. + Deployments(namespace string) DeploymentNamespaceLister + DeploymentListerExpansion +} + +// deploymentLister implements the DeploymentLister interface. +type deploymentLister struct { + indexer cache.Indexer +} + +// NewDeploymentLister returns a new DeploymentLister. +func NewDeploymentLister(indexer cache.Indexer) DeploymentLister { + return &deploymentLister{indexer: indexer} +} + +// List lists all Deployments in the indexer. +func (s *deploymentLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Deployments returns an object that can list and get Deployments. +func (s *deploymentLister) Deployments(namespace string) DeploymentNamespaceLister { + return deploymentNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// DeploymentNamespaceLister helps list and get Deployments. +type DeploymentNamespaceLister interface { + // List lists all Deployments in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) + // Get retrieves the Deployment from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Deployment, error) + DeploymentNamespaceListerExpansion +} + +// deploymentNamespaceLister implements the DeploymentNamespaceLister +// interface. +type deploymentNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Deployments in the indexer for a given namespace. +func (s deploymentNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Deployment, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Deployment)) + }) + return ret, err +} + +// Get retrieves the Deployment from the indexer for a given namespace and name. +func (s deploymentNamespaceLister) Get(name string) (*v1beta1.Deployment, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("deployment"), name) + } + return obj.(*v1beta1.Deployment), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 00000000..b9a14167 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// DeploymentListerExpansion allows custom methods to be added to +// DeploymentLister. +type DeploymentListerExpansion interface { + GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) ([]*extensions.Deployment, error) +} + +// DeploymentNamespaceListerExpansion allows custom methods to be added to +// DeploymentNamespaceLister. +type DeploymentNamespaceListerExpansion interface{} + +// GetDeploymentsForReplicaSet returns a list of Deployments that potentially +// match a ReplicaSet. Only the one specified in the ReplicaSet's ControllerRef +// will actually manage it. +// Returns an error only if no matching Deployments are found. +func (s *deploymentLister) GetDeploymentsForReplicaSet(rs *extensions.ReplicaSet) ([]*extensions.Deployment, error) { + if len(rs.Labels) == 0 { + return nil, fmt.Errorf("no deployments found for ReplicaSet %v because it has no labels", rs.Name) + } + + // TODO: MODIFY THIS METHOD so that it checks for the podTemplateSpecHash label + dList, err := s.Deployments(rs.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var deployments []*extensions.Deployment + for _, d := range dList { + selector, err := metav1.LabelSelectorAsSelector(d.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + // If a deployment with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(rs.Labels)) { + continue + } + deployments = append(deployments, d) + } + + if len(deployments) == 0 { + return nil, fmt.Errorf("could not find deployments set for ReplicaSet %s in namespace %s with labels: %v", rs.Name, rs.Namespace, rs.Labels) + } + + return deployments, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go new file mode 100644 index 00000000..060c7a35 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/expansion_generated.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// IngressListerExpansion allows custom methods to be added to +// IngressLister. +type IngressListerExpansion interface{} + +// IngressNamespaceListerExpansion allows custom methods to be added to +// IngressNamespaceLister. +type IngressNamespaceListerExpansion interface{} + +// PodSecurityPolicyListerExpansion allows custom methods to be added to +// PodSecurityPolicyLister. +type PodSecurityPolicyListerExpansion interface{} + +// ScaleListerExpansion allows custom methods to be added to +// ScaleLister. +type ScaleListerExpansion interface{} + +// ScaleNamespaceListerExpansion allows custom methods to be added to +// ScaleNamespaceLister. +type ScaleNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go new file mode 100644 index 00000000..5615dfcc --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/ingress.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// IngressLister helps list Ingresses. +type IngressLister interface { + // List lists all Ingresses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Ingresses returns an object that can list and get Ingresses. + Ingresses(namespace string) IngressNamespaceLister + IngressListerExpansion +} + +// ingressLister implements the IngressLister interface. +type ingressLister struct { + indexer cache.Indexer +} + +// NewIngressLister returns a new IngressLister. +func NewIngressLister(indexer cache.Indexer) IngressLister { + return &ingressLister{indexer: indexer} +} + +// List lists all Ingresses in the indexer. +func (s *ingressLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Ingresses returns an object that can list and get Ingresses. +func (s *ingressLister) Ingresses(namespace string) IngressNamespaceLister { + return ingressNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// IngressNamespaceLister helps list and get Ingresses. +type IngressNamespaceLister interface { + // List lists all Ingresses in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) + // Get retrieves the Ingress from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Ingress, error) + IngressNamespaceListerExpansion +} + +// ingressNamespaceLister implements the IngressNamespaceLister +// interface. +type ingressNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Ingresses in the indexer for a given namespace. +func (s ingressNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Ingress, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Ingress)) + }) + return ret, err +} + +// Get retrieves the Ingress from the indexer for a given namespace and name. +func (s ingressNamespaceLister) Get(name string) (*v1beta1.Ingress, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("ingress"), name) + } + return obj.(*v1beta1.Ingress), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 00000000..3189ff7c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodSecurityPolicyLister helps list PodSecurityPolicies. +type PodSecurityPolicyLister interface { + // List lists all PodSecurityPolicies in the indexer. + List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) + // Get retrieves the PodSecurityPolicy from the index for a given name. + Get(name string) (*v1beta1.PodSecurityPolicy, error) + PodSecurityPolicyListerExpansion +} + +// podSecurityPolicyLister implements the PodSecurityPolicyLister interface. +type podSecurityPolicyLister struct { + indexer cache.Indexer +} + +// NewPodSecurityPolicyLister returns a new PodSecurityPolicyLister. +func NewPodSecurityPolicyLister(indexer cache.Indexer) PodSecurityPolicyLister { + return &podSecurityPolicyLister{indexer: indexer} +} + +// List lists all PodSecurityPolicies in the indexer. +func (s *podSecurityPolicyLister) List(selector labels.Selector) (ret []*v1beta1.PodSecurityPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodSecurityPolicy)) + }) + return ret, err +} + +// Get retrieves the PodSecurityPolicy from the index for a given name. +func (s *podSecurityPolicyLister) Get(name string) (*v1beta1.PodSecurityPolicy, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("podsecuritypolicy"), name) + } + return obj.(*v1beta1.PodSecurityPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go new file mode 100644 index 00000000..44de996e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ReplicaSetLister helps list ReplicaSets. +type ReplicaSetLister interface { + // List lists all ReplicaSets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + // ReplicaSets returns an object that can list and get ReplicaSets. + ReplicaSets(namespace string) ReplicaSetNamespaceLister + ReplicaSetListerExpansion +} + +// replicaSetLister implements the ReplicaSetLister interface. +type replicaSetLister struct { + indexer cache.Indexer +} + +// NewReplicaSetLister returns a new ReplicaSetLister. +func NewReplicaSetLister(indexer cache.Indexer) ReplicaSetLister { + return &replicaSetLister{indexer: indexer} +} + +// List lists all ReplicaSets in the indexer. +func (s *replicaSetLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ReplicaSet)) + }) + return ret, err +} + +// ReplicaSets returns an object that can list and get ReplicaSets. +func (s *replicaSetLister) ReplicaSets(namespace string) ReplicaSetNamespaceLister { + return replicaSetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ReplicaSetNamespaceLister helps list and get ReplicaSets. +type ReplicaSetNamespaceLister interface { + // List lists all ReplicaSets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) + // Get retrieves the ReplicaSet from the indexer for a given namespace and name. + Get(name string) (*v1beta1.ReplicaSet, error) + ReplicaSetNamespaceListerExpansion +} + +// replicaSetNamespaceLister implements the ReplicaSetNamespaceLister +// interface. +type replicaSetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all ReplicaSets in the indexer for a given namespace. +func (s replicaSetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.ReplicaSet, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ReplicaSet)) + }) + return ret, err +} + +// Get retrieves the ReplicaSet from the indexer for a given namespace and name. +func (s replicaSetNamespaceLister) Get(name string) (*v1beta1.ReplicaSet, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("replicaset"), name) + } + return obj.(*v1beta1.ReplicaSet), nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go new file mode 100644 index 00000000..1f72644c --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/replicaset_expansion.go @@ -0,0 +1,73 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + extensions "k8s.io/api/extensions/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// ReplicaSetListerExpansion allows custom methods to be added to +// ReplicaSetLister. +type ReplicaSetListerExpansion interface { + GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error) +} + +// ReplicaSetNamespaceListerExpansion allows custom methods to be added to +// ReplicaSetNamespaceLister. +type ReplicaSetNamespaceListerExpansion interface{} + +// GetPodReplicaSets returns a list of ReplicaSets that potentially match a pod. +// Only the one specified in the Pod's ControllerRef will actually manage it. +// Returns an error only if no matching ReplicaSets are found. +func (s *replicaSetLister) GetPodReplicaSets(pod *v1.Pod) ([]*extensions.ReplicaSet, error) { + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no ReplicaSets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.ReplicaSets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var rss []*extensions.ReplicaSet + for _, rs := range list { + if rs.Namespace != pod.Namespace { + continue + } + selector, err := metav1.LabelSelectorAsSelector(rs.Spec.Selector) + if err != nil { + return nil, fmt.Errorf("invalid selector: %v", err) + } + + // If a ReplicaSet with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + rss = append(rss, rs) + } + + if len(rss) == 0 { + return nil, fmt.Errorf("could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return rss, nil +} diff --git a/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go new file mode 100644 index 00000000..a027af82 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/extensions/v1beta1/scale.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ScaleLister helps list Scales. +type ScaleLister interface { + // List lists all Scales in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Scales returns an object that can list and get Scales. + Scales(namespace string) ScaleNamespaceLister + ScaleListerExpansion +} + +// scaleLister implements the ScaleLister interface. +type scaleLister struct { + indexer cache.Indexer +} + +// NewScaleLister returns a new ScaleLister. +func NewScaleLister(indexer cache.Indexer) ScaleLister { + return &scaleLister{indexer: indexer} +} + +// List lists all Scales in the indexer. +func (s *scaleLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Scales returns an object that can list and get Scales. +func (s *scaleLister) Scales(namespace string) ScaleNamespaceLister { + return scaleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// ScaleNamespaceLister helps list and get Scales. +type ScaleNamespaceLister interface { + // List lists all Scales in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Scale, err error) + // Get retrieves the Scale from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Scale, error) + ScaleNamespaceListerExpansion +} + +// scaleNamespaceLister implements the ScaleNamespaceLister +// interface. +type scaleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Scales in the indexer for a given namespace. +func (s scaleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Scale, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Scale)) + }) + return ret, err +} + +// Get retrieves the Scale from the indexer for a given namespace and name. +func (s scaleNamespaceLister) Get(name string) (*v1beta1.Scale, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("scale"), name) + } + return obj.(*v1beta1.Scale), nil +} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go new file mode 100644 index 00000000..91fe5e77 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// NetworkPolicyListerExpansion allows custom methods to be added to +// NetworkPolicyLister. +type NetworkPolicyListerExpansion interface{} + +// NetworkPolicyNamespaceListerExpansion allows custom methods to be added to +// NetworkPolicyNamespaceLister. +type NetworkPolicyNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go new file mode 100644 index 00000000..59e17eec --- /dev/null +++ b/vendor/k8s.io/client-go/listers/networking/v1/networkpolicy.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/networking/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// NetworkPolicyLister helps list NetworkPolicies. +type NetworkPolicyLister interface { + // List lists all NetworkPolicies in the indexer. + List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + // NetworkPolicies returns an object that can list and get NetworkPolicies. + NetworkPolicies(namespace string) NetworkPolicyNamespaceLister + NetworkPolicyListerExpansion +} + +// networkPolicyLister implements the NetworkPolicyLister interface. +type networkPolicyLister struct { + indexer cache.Indexer +} + +// NewNetworkPolicyLister returns a new NetworkPolicyLister. +func NewNetworkPolicyLister(indexer cache.Indexer) NetworkPolicyLister { + return &networkPolicyLister{indexer: indexer} +} + +// List lists all NetworkPolicies in the indexer. +func (s *networkPolicyLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkPolicy)) + }) + return ret, err +} + +// NetworkPolicies returns an object that can list and get NetworkPolicies. +func (s *networkPolicyLister) NetworkPolicies(namespace string) NetworkPolicyNamespaceLister { + return networkPolicyNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// NetworkPolicyNamespaceLister helps list and get NetworkPolicies. +type NetworkPolicyNamespaceLister interface { + // List lists all NetworkPolicies in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) + // Get retrieves the NetworkPolicy from the indexer for a given namespace and name. + Get(name string) (*v1.NetworkPolicy, error) + NetworkPolicyNamespaceListerExpansion +} + +// networkPolicyNamespaceLister implements the NetworkPolicyNamespaceLister +// interface. +type networkPolicyNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all NetworkPolicies in the indexer for a given namespace. +func (s networkPolicyNamespaceLister) List(selector labels.Selector) (ret []*v1.NetworkPolicy, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.NetworkPolicy)) + }) + return ret, err +} + +// Get retrieves the NetworkPolicy from the indexer for a given namespace and name. +func (s networkPolicyNamespaceLister) Get(name string) (*v1.NetworkPolicy, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("networkpolicy"), name) + } + return obj.(*v1.NetworkPolicy), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go new file mode 100644 index 00000000..742775f6 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/eviction.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// EvictionLister helps list Evictions. +type EvictionLister interface { + // List lists all Evictions in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + // Evictions returns an object that can list and get Evictions. + Evictions(namespace string) EvictionNamespaceLister + EvictionListerExpansion +} + +// evictionLister implements the EvictionLister interface. +type evictionLister struct { + indexer cache.Indexer +} + +// NewEvictionLister returns a new EvictionLister. +func NewEvictionLister(indexer cache.Indexer) EvictionLister { + return &evictionLister{indexer: indexer} +} + +// List lists all Evictions in the indexer. +func (s *evictionLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Eviction)) + }) + return ret, err +} + +// Evictions returns an object that can list and get Evictions. +func (s *evictionLister) Evictions(namespace string) EvictionNamespaceLister { + return evictionNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// EvictionNamespaceLister helps list and get Evictions. +type EvictionNamespaceLister interface { + // List lists all Evictions in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) + // Get retrieves the Eviction from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Eviction, error) + EvictionNamespaceListerExpansion +} + +// evictionNamespaceLister implements the EvictionNamespaceLister +// interface. +type evictionNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Evictions in the indexer for a given namespace. +func (s evictionNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Eviction, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Eviction)) + }) + return ret, err +} + +// Get retrieves the Eviction from the indexer for a given namespace and name. +func (s evictionNamespaceLister) Get(name string) (*v1beta1.Eviction, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("eviction"), name) + } + return obj.(*v1beta1.Eviction), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go new file mode 100644 index 00000000..4785fbc0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// EvictionListerExpansion allows custom methods to be added to +// EvictionLister. +type EvictionListerExpansion interface{} + +// EvictionNamespaceListerExpansion allows custom methods to be added to +// EvictionNamespaceLister. +type EvictionNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 00000000..6512f29f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodDisruptionBudgetLister helps list PodDisruptionBudgets. +type PodDisruptionBudgetLister interface { + // List lists all PodDisruptionBudgets in the indexer. + List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + // PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. + PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister + PodDisruptionBudgetListerExpansion +} + +// podDisruptionBudgetLister implements the PodDisruptionBudgetLister interface. +type podDisruptionBudgetLister struct { + indexer cache.Indexer +} + +// NewPodDisruptionBudgetLister returns a new PodDisruptionBudgetLister. +func NewPodDisruptionBudgetLister(indexer cache.Indexer) PodDisruptionBudgetLister { + return &podDisruptionBudgetLister{indexer: indexer} +} + +// List lists all PodDisruptionBudgets in the indexer. +func (s *podDisruptionBudgetLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) + }) + return ret, err +} + +// PodDisruptionBudgets returns an object that can list and get PodDisruptionBudgets. +func (s *podDisruptionBudgetLister) PodDisruptionBudgets(namespace string) PodDisruptionBudgetNamespaceLister { + return podDisruptionBudgetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodDisruptionBudgetNamespaceLister helps list and get PodDisruptionBudgets. +type PodDisruptionBudgetNamespaceLister interface { + // List lists all PodDisruptionBudgets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) + // Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. + Get(name string) (*v1beta1.PodDisruptionBudget, error) + PodDisruptionBudgetNamespaceListerExpansion +} + +// podDisruptionBudgetNamespaceLister implements the PodDisruptionBudgetNamespaceLister +// interface. +type podDisruptionBudgetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodDisruptionBudgets in the indexer for a given namespace. +func (s podDisruptionBudgetNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.PodDisruptionBudget, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.PodDisruptionBudget)) + }) + return ret, err +} + +// Get retrieves the PodDisruptionBudget from the indexer for a given namespace and name. +func (s podDisruptionBudgetNamespaceLister) Get(name string) (*v1beta1.PodDisruptionBudget, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("poddisruptionbudget"), name) + } + return obj.(*v1beta1.PodDisruptionBudget), nil +} diff --git a/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go new file mode 100644 index 00000000..c0ab9d3e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/policy/v1beta1/poddisruptionbudget_expansion.go @@ -0,0 +1,74 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + "github.com/golang/glog" + "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +// PodDisruptionBudgetListerExpansion allows custom methods to be added to +// PodDisruptionBudgetLister. +type PodDisruptionBudgetListerExpansion interface { + GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) +} + +// PodDisruptionBudgetNamespaceListerExpansion allows custom methods to be added to +// PodDisruptionBudgetNamespaceLister. +type PodDisruptionBudgetNamespaceListerExpansion interface{} + +// GetPodPodDisruptionBudgets returns a list of PodDisruptionBudgets matching a pod. Returns an error only if no matching PodDisruptionBudgets are found. +func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*policy.PodDisruptionBudget, error) { + var selector labels.Selector + + if len(pod.Labels) == 0 { + return nil, fmt.Errorf("no PodDisruptionBudgets found for pod %v because it has no labels", pod.Name) + } + + list, err := s.PodDisruptionBudgets(pod.Namespace).List(labels.Everything()) + if err != nil { + return nil, err + } + + var pdbList []*policy.PodDisruptionBudget + for i := range list { + pdb := list[i] + selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector) + if err != nil { + glog.Warningf("invalid selector: %v", err) + // TODO(mml): add an event to the PDB + continue + } + + // If a PDB with a nil or empty selector creeps in, it should match nothing, not everything. + if selector.Empty() || !selector.Matches(labels.Set(pod.Labels)) { + continue + } + pdbList = append(pdbList, pdb) + } + + if len(pdbList) == 0 { + return nil, fmt.Errorf("could not find PodDisruptionBudget for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels) + } + + return pdbList, nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go new file mode 100644 index 00000000..5dc9a225 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterrole"), name) + } + return obj.(*v1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go new file mode 100644 index 00000000..bb3186a0 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("clusterrolebinding"), name) + } + return obj.(*v1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go new file mode 100644 index 00000000..4d9872d3 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1/role.go new file mode 100644 index 00000000..8d7625db --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("role"), name) + } + return obj.(*v1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go new file mode 100644 index 00000000..b8209d85 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("rolebinding"), name) + } + return obj.(*v1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go new file mode 100644 index 00000000..9e20a6d1 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1alpha1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1alpha1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterrole"), name) + } + return obj.(*v1alpha1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 00000000..155666ab --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1alpha1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1alpha1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clusterrolebinding"), name) + } + return obj.(*v1alpha1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..0ab4fb99 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go new file mode 100644 index 00000000..72ab79c9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1alpha1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("role"), name) + } + return obj.(*v1alpha1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go new file mode 100644 index 00000000..7f9cfd45 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1alpha1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("rolebinding"), name) + } + return obj.(*v1alpha1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go new file mode 100644 index 00000000..65ec3eb9 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrole.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleLister helps list ClusterRoles. +type ClusterRoleLister interface { + // List lists all ClusterRoles in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) + // Get retrieves the ClusterRole from the index for a given name. + Get(name string) (*v1beta1.ClusterRole, error) + ClusterRoleListerExpansion +} + +// clusterRoleLister implements the ClusterRoleLister interface. +type clusterRoleLister struct { + indexer cache.Indexer +} + +// NewClusterRoleLister returns a new ClusterRoleLister. +func NewClusterRoleLister(indexer cache.Indexer) ClusterRoleLister { + return &clusterRoleLister{indexer: indexer} +} + +// List lists all ClusterRoles in the indexer. +func (s *clusterRoleLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRole, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ClusterRole)) + }) + return ret, err +} + +// Get retrieves the ClusterRole from the index for a given name. +func (s *clusterRoleLister) Get(name string) (*v1beta1.ClusterRole, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("clusterrole"), name) + } + return obj.(*v1beta1.ClusterRole), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 00000000..146f2d7f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterRoleBindingLister helps list ClusterRoleBindings. +type ClusterRoleBindingLister interface { + // List lists all ClusterRoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) + // Get retrieves the ClusterRoleBinding from the index for a given name. + Get(name string) (*v1beta1.ClusterRoleBinding, error) + ClusterRoleBindingListerExpansion +} + +// clusterRoleBindingLister implements the ClusterRoleBindingLister interface. +type clusterRoleBindingLister struct { + indexer cache.Indexer +} + +// NewClusterRoleBindingLister returns a new ClusterRoleBindingLister. +func NewClusterRoleBindingLister(indexer cache.Indexer) ClusterRoleBindingLister { + return &clusterRoleBindingLister{indexer: indexer} +} + +// List lists all ClusterRoleBindings in the indexer. +func (s *clusterRoleBindingLister) List(selector labels.Selector) (ret []*v1beta1.ClusterRoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.ClusterRoleBinding)) + }) + return ret, err +} + +// Get retrieves the ClusterRoleBinding from the index for a given name. +func (s *clusterRoleBindingLister) Get(name string) (*v1beta1.ClusterRoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("clusterrolebinding"), name) + } + return obj.(*v1beta1.ClusterRoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go new file mode 100644 index 00000000..b6eeae83 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/expansion_generated.go @@ -0,0 +1,43 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// ClusterRoleListerExpansion allows custom methods to be added to +// ClusterRoleLister. +type ClusterRoleListerExpansion interface{} + +// ClusterRoleBindingListerExpansion allows custom methods to be added to +// ClusterRoleBindingLister. +type ClusterRoleBindingListerExpansion interface{} + +// RoleListerExpansion allows custom methods to be added to +// RoleLister. +type RoleListerExpansion interface{} + +// RoleNamespaceListerExpansion allows custom methods to be added to +// RoleNamespaceLister. +type RoleNamespaceListerExpansion interface{} + +// RoleBindingListerExpansion allows custom methods to be added to +// RoleBindingLister. +type RoleBindingListerExpansion interface{} + +// RoleBindingNamespaceListerExpansion allows custom methods to be added to +// RoleBindingNamespaceLister. +type RoleBindingNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go new file mode 100644 index 00000000..b795e98b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/role.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleLister helps list Roles. +type RoleLister interface { + // List lists all Roles in the indexer. + List(selector labels.Selector) (ret []*v1beta1.Role, err error) + // Roles returns an object that can list and get Roles. + Roles(namespace string) RoleNamespaceLister + RoleListerExpansion +} + +// roleLister implements the RoleLister interface. +type roleLister struct { + indexer cache.Indexer +} + +// NewRoleLister returns a new RoleLister. +func NewRoleLister(indexer cache.Indexer) RoleLister { + return &roleLister{indexer: indexer} +} + +// List lists all Roles in the indexer. +func (s *roleLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Role)) + }) + return ret, err +} + +// Roles returns an object that can list and get Roles. +func (s *roleLister) Roles(namespace string) RoleNamespaceLister { + return roleNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleNamespaceLister helps list and get Roles. +type RoleNamespaceLister interface { + // List lists all Roles in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.Role, err error) + // Get retrieves the Role from the indexer for a given namespace and name. + Get(name string) (*v1beta1.Role, error) + RoleNamespaceListerExpansion +} + +// roleNamespaceLister implements the RoleNamespaceLister +// interface. +type roleNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all Roles in the indexer for a given namespace. +func (s roleNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.Role, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.Role)) + }) + return ret, err +} + +// Get retrieves the Role from the indexer for a given namespace and name. +func (s roleNamespaceLister) Get(name string) (*v1beta1.Role, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("role"), name) + } + return obj.(*v1beta1.Role), nil +} diff --git a/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go new file mode 100644 index 00000000..d27ea2eb --- /dev/null +++ b/vendor/k8s.io/client-go/listers/rbac/v1beta1/rolebinding.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// RoleBindingLister helps list RoleBindings. +type RoleBindingLister interface { + // List lists all RoleBindings in the indexer. + List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + // RoleBindings returns an object that can list and get RoleBindings. + RoleBindings(namespace string) RoleBindingNamespaceLister + RoleBindingListerExpansion +} + +// roleBindingLister implements the RoleBindingLister interface. +type roleBindingLister struct { + indexer cache.Indexer +} + +// NewRoleBindingLister returns a new RoleBindingLister. +func NewRoleBindingLister(indexer cache.Indexer) RoleBindingLister { + return &roleBindingLister{indexer: indexer} +} + +// List lists all RoleBindings in the indexer. +func (s *roleBindingLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RoleBinding)) + }) + return ret, err +} + +// RoleBindings returns an object that can list and get RoleBindings. +func (s *roleBindingLister) RoleBindings(namespace string) RoleBindingNamespaceLister { + return roleBindingNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// RoleBindingNamespaceLister helps list and get RoleBindings. +type RoleBindingNamespaceLister interface { + // List lists all RoleBindings in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) + // Get retrieves the RoleBinding from the indexer for a given namespace and name. + Get(name string) (*v1beta1.RoleBinding, error) + RoleBindingNamespaceListerExpansion +} + +// roleBindingNamespaceLister implements the RoleBindingNamespaceLister +// interface. +type roleBindingNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all RoleBindings in the indexer for a given namespace. +func (s roleBindingNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.RoleBinding, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.RoleBinding)) + }) + return ret, err +} + +// Get retrieves the RoleBinding from the indexer for a given namespace and name. +func (s roleBindingNamespaceLister) Get(name string) (*v1beta1.RoleBinding, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("rolebinding"), name) + } + return obj.(*v1beta1.RoleBinding), nil +} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..8a644c80 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// PriorityClassListerExpansion allows custom methods to be added to +// PriorityClassLister. +type PriorityClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 00000000..9ed04fd2 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PriorityClassLister helps list PriorityClasses. +type PriorityClassLister interface { + // List lists all PriorityClasses in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) + // Get retrieves the PriorityClass from the index for a given name. + Get(name string) (*v1alpha1.PriorityClass, error) + PriorityClassListerExpansion +} + +// priorityClassLister implements the PriorityClassLister interface. +type priorityClassLister struct { + indexer cache.Indexer +} + +// NewPriorityClassLister returns a new PriorityClassLister. +func NewPriorityClassLister(indexer cache.Indexer) PriorityClassLister { + return &priorityClassLister{indexer: indexer} +} + +// List lists all PriorityClasses in the indexer. +func (s *priorityClassLister) List(selector labels.Selector) (ret []*v1alpha1.PriorityClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PriorityClass)) + }) + return ret, err +} + +// Get retrieves the PriorityClass from the index for a given name. +func (s *priorityClassLister) Get(name string) (*v1alpha1.PriorityClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("priorityclass"), name) + } + return obj.(*v1alpha1.PriorityClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..7a5ce38e --- /dev/null +++ b/vendor/k8s.io/client-go/listers/settings/v1alpha1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// PodPresetListerExpansion allows custom methods to be added to +// PodPresetLister. +type PodPresetListerExpansion interface{} + +// PodPresetNamespaceListerExpansion allows custom methods to be added to +// PodPresetNamespaceLister. +type PodPresetNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go new file mode 100644 index 00000000..18f62249 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/settings/v1alpha1/podpreset.go @@ -0,0 +1,94 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// PodPresetLister helps list PodPresets. +type PodPresetLister interface { + // List lists all PodPresets in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) + // PodPresets returns an object that can list and get PodPresets. + PodPresets(namespace string) PodPresetNamespaceLister + PodPresetListerExpansion +} + +// podPresetLister implements the PodPresetLister interface. +type podPresetLister struct { + indexer cache.Indexer +} + +// NewPodPresetLister returns a new PodPresetLister. +func NewPodPresetLister(indexer cache.Indexer) PodPresetLister { + return &podPresetLister{indexer: indexer} +} + +// List lists all PodPresets in the indexer. +func (s *podPresetLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodPreset)) + }) + return ret, err +} + +// PodPresets returns an object that can list and get PodPresets. +func (s *podPresetLister) PodPresets(namespace string) PodPresetNamespaceLister { + return podPresetNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// PodPresetNamespaceLister helps list and get PodPresets. +type PodPresetNamespaceLister interface { + // List lists all PodPresets in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) + // Get retrieves the PodPreset from the indexer for a given namespace and name. + Get(name string) (*v1alpha1.PodPreset, error) + PodPresetNamespaceListerExpansion +} + +// podPresetNamespaceLister implements the PodPresetNamespaceLister +// interface. +type podPresetNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all PodPresets in the indexer for a given namespace. +func (s podPresetNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.PodPreset, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.PodPreset)) + }) + return ret, err +} + +// Get retrieves the PodPreset from the indexer for a given namespace and name. +func (s podPresetNamespaceLister) Get(name string) (*v1alpha1.PodPreset, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("podpreset"), name) + } + return obj.(*v1alpha1.PodPreset), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go new file mode 100644 index 00000000..2353b59d --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +// StorageClassListerExpansion allows custom methods to be added to +// StorageClassLister. +type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go new file mode 100644 index 00000000..7c37321f --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1/storageclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageClassLister helps list StorageClasses. +type StorageClassLister interface { + // List lists all StorageClasses in the indexer. + List(selector labels.Selector) (ret []*v1.StorageClass, err error) + // Get retrieves the StorageClass from the index for a given name. + Get(name string) (*v1.StorageClass, error) + StorageClassListerExpansion +} + +// storageClassLister implements the StorageClassLister interface. +type storageClassLister struct { + indexer cache.Indexer +} + +// NewStorageClassLister returns a new StorageClassLister. +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { + return &storageClassLister{indexer: indexer} +} + +// List lists all StorageClasses in the indexer. +func (s *storageClassLister) List(selector labels.Selector) (ret []*v1.StorageClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.StorageClass)) + }) + return ret, err +} + +// Get retrieves the StorageClass from the index for a given name. +func (s *storageClassLister) Get(name string) (*v1.StorageClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("storageclass"), name) + } + return obj.(*v1.StorageClass), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go new file mode 100644 index 00000000..63abe94a --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +// VolumeAttachmentListerExpansion allows custom methods to be added to +// VolumeAttachmentLister. +type VolumeAttachmentListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go new file mode 100644 index 00000000..02004629 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// VolumeAttachmentLister helps list VolumeAttachments. +type VolumeAttachmentLister interface { + // List lists all VolumeAttachments in the indexer. + List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) + // Get retrieves the VolumeAttachment from the index for a given name. + Get(name string) (*v1alpha1.VolumeAttachment, error) + VolumeAttachmentListerExpansion +} + +// volumeAttachmentLister implements the VolumeAttachmentLister interface. +type volumeAttachmentLister struct { + indexer cache.Indexer +} + +// NewVolumeAttachmentLister returns a new VolumeAttachmentLister. +func NewVolumeAttachmentLister(indexer cache.Indexer) VolumeAttachmentLister { + return &volumeAttachmentLister{indexer: indexer} +} + +// List lists all VolumeAttachments in the indexer. +func (s *volumeAttachmentLister) List(selector labels.Selector) (ret []*v1alpha1.VolumeAttachment, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.VolumeAttachment)) + }) + return ret, err +} + +// Get retrieves the VolumeAttachment from the index for a given name. +func (s *volumeAttachmentLister) Get(name string) (*v1alpha1.VolumeAttachment, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("volumeattachment"), name) + } + return obj.(*v1alpha1.VolumeAttachment), nil +} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go new file mode 100644 index 00000000..84e0f9c4 --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +// StorageClassListerExpansion allows custom methods to be added to +// StorageClassLister. +type StorageClassListerExpansion interface{} diff --git a/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go new file mode 100644 index 00000000..9253319b --- /dev/null +++ b/vendor/k8s.io/client-go/listers/storage/v1beta1/storageclass.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was automatically generated by lister-gen + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// StorageClassLister helps list StorageClasses. +type StorageClassLister interface { + // List lists all StorageClasses in the indexer. + List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) + // Get retrieves the StorageClass from the index for a given name. + Get(name string) (*v1beta1.StorageClass, error) + StorageClassListerExpansion +} + +// storageClassLister implements the StorageClassLister interface. +type storageClassLister struct { + indexer cache.Indexer +} + +// NewStorageClassLister returns a new StorageClassLister. +func NewStorageClassLister(indexer cache.Indexer) StorageClassLister { + return &storageClassLister{indexer: indexer} +} + +// List lists all StorageClasses in the indexer. +func (s *storageClassLister) List(selector labels.Selector) (ret []*v1beta1.StorageClass, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1beta1.StorageClass)) + }) + return ret, err +} + +// Get retrieves the StorageClass from the index for a given name. +func (s *storageClassLister) Get(name string) (*v1beta1.StorageClass, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1beta1.Resource("storageclass"), name) + } + return obj.(*v1beta1.StorageClass), nil +} diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go index c377705f..7ab0ed3a 100644 --- a/vendor/k8s.io/client-go/pkg/version/base.go +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -51,9 +51,13 @@ var ( // semantic version is a git hash, but the version itself is no // longer the direct output of "git describe", but a slight // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes gitVersion string = "v0.0.0-master+$Format:%h$" - gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) - gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') ) diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index 97ec03e0..aac126cc 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -179,6 +179,24 @@ func (r *Request) Resource(resource string) *Request { return r } +// BackOff sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided +func (r *Request) BackOff(manager BackoffManager) *Request { + if manager == nil { + r.backoffMgr = &NoBackoff{} + return r + } + + r.backoffMgr = manager + return r +} + +// Throttle receives a rate-limiter and sets or replaces an existing request limiter +func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { + r.throttle = limiter + return r +} + // SubResource sets a sub-resource path which can be multiple segments segment after the resource // name but before the suffix. func (r *Request) SubResource(subresources ...string) *Request { @@ -827,6 +845,8 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { + case bool(glog.V(10)): + return body case bool(glog.V(9)): max = 10240 case bool(glog.V(8)): diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go index 1632e1ef..59050fc4 100644 --- a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -20,23 +20,6 @@ limitations under the License. package rest -import ( - conversion "k8s.io/apimachinery/pkg/conversion" - reflect "reflect" -) - -// GetGeneratedDeepCopyFuncs returns the generated funcs, since we aren't registering them. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func GetGeneratedDeepCopyFuncs() []conversion.GeneratedDeepCopyFunc { - return []conversion.GeneratedDeepCopyFunc{ - {Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TLSClientConfig).DeepCopyInto(out.(*TLSClientConfig)) - return nil - }, InType: reflect.TypeOf(&TLSClientConfig{})}, - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/cache/reflector.go b/vendor/k8s.io/client-go/tools/cache/reflector.go index 4967f98d..a97b5f98 100644 --- a/vendor/k8s.io/client-go/tools/cache/reflector.go +++ b/vendor/k8s.io/client-go/tools/cache/reflector.go @@ -109,7 +109,7 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, r := &Reflector{ name: name, // we need this to be unique per process (some names are still the same)but obvious who it belongs to - metrics: newReflectorMetrics(makeValidPromethusMetricName(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), + metrics: newReflectorMetrics(makeValidPromethusMetricLabel(fmt.Sprintf("reflector_"+name+"_%d", reflectorSuffix))), listerWatcher: lw, store: store, expectedType: reflect.TypeOf(expectedType), @@ -120,9 +120,9 @@ func NewNamedReflector(name string, lw ListerWatcher, expectedType interface{}, return r } -func makeValidPromethusMetricName(in string) string { +func makeValidPromethusMetricLabel(in string) string { // this isn't perfect, but it removes our common characters - return strings.NewReplacer("/", "_", ".", "_", "-", "_").Replace(in) + return strings.NewReplacer("/", "_", ".", "_", "-", "_", ":", "_").Replace(in) } // internalPackages are packages that ignored when creating a default reflector name. These packages are in the common @@ -295,6 +295,13 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error { }() for { + // give the stopCh a chance to stop the loop, even in case of continue statements further down on errors + select { + case <-stopCh: + return nil + default: + } + timemoutseconds := int64(minWatchTimeout.Seconds() * (rand.Float64() + 1.0)) options = metav1.ListOptions{ ResourceVersion: resourceVersion, diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 93a891e9..0a081871 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -14,5 +14,5 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go index b787f0dd..51668f05 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -21,48 +21,9 @@ limitations under the License. package api import ( - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthInfo).DeepCopyInto(out.(*AuthInfo)) - return nil - }, InType: reflect.TypeOf(&AuthInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AuthProviderConfig).DeepCopyInto(out.(*AuthProviderConfig)) - return nil - }, InType: reflect.TypeOf(&AuthProviderConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Cluster).DeepCopyInto(out.(*Cluster)) - return nil - }, InType: reflect.TypeOf(&Cluster{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Config).DeepCopyInto(out.(*Config)) - return nil - }, InType: reflect.TypeOf(&Config{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Context).DeepCopyInto(out.(*Context)) - return nil - }, InType: reflect.TypeOf(&Context{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preferences).DeepCopyInto(out.(*Preferences)) - return nil - }, InType: reflect.TypeOf(&Preferences{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { *out = *in diff --git a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go index bcbe9fcd..6b69f366 100644 --- a/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go +++ b/vendor/k8s.io/client-go/tools/remotecommand/remotecommand.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/remotecommand" restclient "k8s.io/client-go/rest" - "k8s.io/client-go/transport" spdy "k8s.io/client-go/transport/spdy" ) @@ -72,8 +71,18 @@ type streamExecutor struct { // NewSPDYExecutor connects to the provided server and upgrades the connection to // multiplexed bidirectional streams. func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Executor, error) { + wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) + if err != nil { + return nil, err + } + return NewSPDYExecutorForTransports(wrapper, upgradeRoundTripper, method, url) +} + +// NewSPDYExecutorForTransports connects to the provided server using the given transport, +// upgrades the response using the given upgrader to multiplexed bidirectional streams. +func NewSPDYExecutorForTransports(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL) (Executor, error) { return NewSPDYExecutorForProtocols( - config, method, url, + transport, upgrader, method, url, remotecommand.StreamProtocolV4Name, remotecommand.StreamProtocolV3Name, remotecommand.StreamProtocolV2Name, @@ -83,16 +92,11 @@ func NewSPDYExecutor(config *restclient.Config, method string, url *url.URL) (Ex // NewSPDYExecutorForProtocols connects to the provided server and upgrades the connection to // multiplexed bidirectional streams using only the provided protocols. Exposed for testing, most -// callers should use NewSPDYExecutor. -func NewSPDYExecutorForProtocols(config *restclient.Config, method string, url *url.URL, protocols ...string) (Executor, error) { - wrapper, upgradeRoundTripper, err := spdy.RoundTripperFor(config) - if err != nil { - return nil, err - } - wrapper = transport.DebugWrappers(wrapper) +// callers should use NewSPDYExecutor or NewSPDYExecutorForTransports. +func NewSPDYExecutorForProtocols(transport http.RoundTripper, upgrader spdy.Upgrader, method string, url *url.URL, protocols ...string) (Executor, error) { return &streamExecutor{ - upgrader: upgradeRoundTripper, - transport: wrapper, + upgrader: upgrader, + transport: transport, method: method, url: url, protocols: protocols, diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go index 561c92c1..da22cdee 100644 --- a/vendor/k8s.io/client-go/transport/cache.go +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -88,5 +88,5 @@ func tlsConfigKey(c *Config) (string, error) { return "", err } // Only include the things that actually affect the tls.Config - return fmt.Sprintf("%v/%x/%x/%x", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData), nil + return fmt.Sprintf("%v/%x/%x/%x/%v", c.TLS.Insecure, c.TLS.CAData, c.TLS.CertData, c.TLS.KeyData, c.TLS.ServerName), nil } diff --git a/vendor/k8s.io/kube-openapi/README.md b/vendor/k8s.io/kube-openapi/README.md index 1b24a782..babadde1 100644 --- a/vendor/k8s.io/kube-openapi/README.md +++ b/vendor/k8s.io/kube-openapi/README.md @@ -1,14 +1,14 @@ # Kube OpenAPI -This repo is the home for Kubernetes OpenAPI discovery spec generation. The goal -is to support a subset of OpenAPI features to satisfy kubernetes use-cases but -implement that subset with little to no assumption about the structure of the -code or routes. Thus, there should be no kubernetes specific code in this repo. +This repo is the home for Kubernetes OpenAPI discovery spec generation. The goal +is to support a subset of OpenAPI features to satisfy kubernetes use-cases but +implement that subset with little to no assumption about the structure of the +code or routes. Thus, there should be no kubernetes specific code in this repo. -There are two main parts: - - A model generator that goes through .go files, find and generate model -definitions. - - The spec generator that is responsible for dynamically generate -the final OpenAPI spec using web service routes or combining other +There are two main parts: + - A model generator that goes through .go files, find and generate model +definitions. + - The spec generator that is responsible for dynamically generate +the final OpenAPI spec using web service routes or combining other OpenAPI/Json specs. diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go new file mode 100644 index 00000000..11ed8a6b --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package proto is a collection of libraries for parsing and indexing the type definitions. +// The openapi spec contains the object model definitions and extensions metadata. +package proto diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go new file mode 100644 index 00000000..5f607c76 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/document.go @@ -0,0 +1,275 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" + + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + yaml "gopkg.in/yaml.v2" +) + +func newSchemaError(path *Path, format string, a ...interface{}) error { + err := fmt.Sprintf(format, a...) + if path.Len() == 0 { + return fmt.Errorf("SchemaError: %v", err) + } + return fmt.Errorf("SchemaError(%v): %v", path, err) +} + +// VendorExtensionToMap converts openapi VendorExtension to a map. +func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} { + values := map[string]interface{}{} + + for _, na := range e { + if na.GetName() == "" || na.GetValue() == nil { + continue + } + if na.GetValue().GetYaml() == "" { + continue + } + var value interface{} + err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value) + if err != nil { + continue + } + + values[na.GetName()] = value + } + + return values +} + +// Definitions is an implementation of `Models`. It looks for +// models in an openapi Schema. +type Definitions struct { + models map[string]Schema +} + +var _ Models = &Definitions{} + +// NewOpenAPIData creates a new `Models` out of the openapi document. +func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) { + definitions := Definitions{ + models: map[string]Schema{}, + } + + // Save the list of all models first. This will allow us to + // validate that we don't have any dangling reference. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + definitions.models[namedSchema.GetName()] = nil + } + + // Now, parse each model. We can validate that references exists. + for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() { + path := NewPath(namedSchema.GetName()) + schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + definitions.models[namedSchema.GetName()] = schema + } + + return &definitions, nil +} + +// We believe the schema is a reference, verify that and returns a new +// Schema +func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetProperties().GetAdditionalProperties()) > 0 { + return nil, newSchemaError(path, "unallowed embedded type definition") + } + if len(s.GetType().GetValue()) > 0 { + return nil, newSchemaError(path, "definition reference can't have a type") + } + + if !strings.HasPrefix(s.GetXRef(), "#/definitions/") { + return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef()) + } + reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/") + if _, ok := d.models[reference]; !ok { + return nil, newSchemaError(path, "unknown model in reference: %q", reference) + } + return &Ref{ + BaseSchema: d.parseBaseSchema(s, path), + reference: reference, + definitions: d, + }, nil +} + +func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema { + return BaseSchema{ + Description: s.GetDescription(), + Extensions: VendorExtensionToMap(s.GetVendorExtension()), + Path: *path, + } +} + +// We believe the schema is a map, verify and return a new schema +func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetAdditionalProperties().GetSchema() == nil { + return nil, newSchemaError(path, "invalid object doesn't have additional properties") + } + sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path) + if err != nil { + return nil, err + } + return &Map{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) { + var t string + if len(s.GetType().GetValue()) > 1 { + return nil, newSchemaError(path, "primitive can't have more than 1 type") + } + if len(s.GetType().GetValue()) == 1 { + t = s.GetType().GetValue()[0] + } + switch t { + case String: + case Number: + case Integer: + case Boolean: + case "": // Some models are completely empty, and can be safely ignored. + // Do nothing + default: + return nil, newSchemaError(path, "Unknown primitive type: %q", t) + } + return &Primitive{ + BaseSchema: d.parseBaseSchema(s, path), + Type: t, + Format: s.GetFormat(), + }, nil +} + +func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 1 { + return nil, newSchemaError(path, "array should have exactly one type") + } + if s.GetType().GetValue()[0] != array { + return nil, newSchemaError(path, `array should have type "array"`) + } + if len(s.GetItems().GetSchema()) != 1 { + return nil, newSchemaError(path, "array should have exactly one sub-item") + } + sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path) + if err != nil { + return nil, err + } + return &Array{ + BaseSchema: d.parseBaseSchema(s, path), + SubType: sub, + }, nil +} + +func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object { + return nil, newSchemaError(path, "invalid object type") + } + if s.GetProperties() == nil { + return nil, newSchemaError(path, "object doesn't have properties") + } + + fields := map[string]Schema{} + + for _, namedSchema := range s.GetProperties().GetAdditionalProperties() { + var err error + path := path.FieldPath(namedSchema.GetName()) + fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path) + if err != nil { + return nil, err + } + } + + return &Kind{ + BaseSchema: d.parseBaseSchema(s, path), + RequiredFields: s.GetRequired(), + Fields: fields, + }, nil +} + +// ParseSchema creates a walkable Schema from an openapi schema. While +// this function is public, it doesn't leak through the interface. +func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) { + if len(s.GetType().GetValue()) == 1 { + t := s.GetType().GetValue()[0] + switch t { + case object: + return d.parseMap(s, path) + case array: + return d.parseArray(s, path) + } + + } + if s.GetXRef() != "" { + return d.parseReference(s, path) + } + if s.GetProperties() != nil { + return d.parseKind(s, path) + } + return d.parsePrimitive(s, path) +} + +// LookupModel is public through the interface of Models. It +// returns a visitable schema from the given model name. +func (d *Definitions) LookupModel(model string) Schema { + return d.models[model] +} + +func (d *Definitions) ListModels() []string { + models := []string{} + + for model := range d.models { + models = append(models, model) + } + + sort.Strings(models) + return models +} + +type Ref struct { + BaseSchema + + reference string + definitions *Definitions +} + +var _ Reference = &Ref{} + +func (r *Ref) Reference() string { + return r.reference +} + +func (r *Ref) SubSchema() Schema { + return r.definitions.models[r.reference] +} + +func (r *Ref) Accept(v SchemaVisitor) { + v.VisitReference(r) +} + +func (r *Ref) GetName() string { + return fmt.Sprintf("Reference to %q", r.reference) +} diff --git a/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go new file mode 100644 index 00000000..02ab06d6 --- /dev/null +++ b/vendor/k8s.io/kube-openapi/pkg/util/proto/openapi.go @@ -0,0 +1,251 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proto + +import ( + "fmt" + "sort" + "strings" +) + +// Defines openapi types. +const ( + Integer = "integer" + Number = "number" + String = "string" + Boolean = "boolean" + + // These types are private as they should never leak, and are + // represented by actual structs. + array = "array" + object = "object" +) + +// Models interface describe a model provider. They can give you the +// schema for a specific model. +type Models interface { + LookupModel(string) Schema + ListModels() []string +} + +// SchemaVisitor is an interface that you need to implement if you want +// to "visit" an openapi schema. A dispatch on the Schema type will call +// the appropriate function based on its actual type: +// - Array is a list of one and only one given subtype +// - Map is a map of string to one and only one given subtype +// - Primitive can be string, integer, number and boolean. +// - Kind is an object with specific fields mapping to specific types. +// - Reference is a link to another definition. +type SchemaVisitor interface { + VisitArray(*Array) + VisitMap(*Map) + VisitPrimitive(*Primitive) + VisitKind(*Kind) + VisitReference(Reference) +} + +// Schema is the base definition of an openapi type. +type Schema interface { + // Giving a visitor here will let you visit the actual type. + Accept(SchemaVisitor) + + // Pretty print the name of the type. + GetName() string + // Describes how to access this field. + GetPath() *Path + // Describes the field. + GetDescription() string + // Returns type extensions. + GetExtensions() map[string]interface{} +} + +// Path helps us keep track of type paths +type Path struct { + parent *Path + key string +} + +func NewPath(key string) Path { + return Path{key: key} +} + +func (p *Path) Get() []string { + if p == nil { + return []string{} + } + if p.key == "" { + return p.parent.Get() + } + return append(p.parent.Get(), p.key) +} + +func (p *Path) Len() int { + return len(p.Get()) +} + +func (p *Path) String() string { + return strings.Join(p.Get(), "") +} + +// ArrayPath appends an array index and creates a new path +func (p *Path) ArrayPath(i int) Path { + return Path{ + parent: p, + key: fmt.Sprintf("[%d]", i), + } +} + +// FieldPath appends a field name and creates a new path +func (p *Path) FieldPath(field string) Path { + return Path{ + parent: p, + key: fmt.Sprintf(".%s", field), + } +} + +// BaseSchema holds data used by each types of schema. +type BaseSchema struct { + Description string + Extensions map[string]interface{} + + Path Path +} + +func (b *BaseSchema) GetDescription() string { + return b.Description +} + +func (b *BaseSchema) GetExtensions() map[string]interface{} { + return b.Extensions +} + +func (b *BaseSchema) GetPath() *Path { + return &b.Path +} + +// Array must have all its element of the same `SubType`. +type Array struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Array{} + +func (a *Array) Accept(v SchemaVisitor) { + v.VisitArray(a) +} + +func (a *Array) GetName() string { + return fmt.Sprintf("Array of %s", a.SubType.GetName()) +} + +// Kind is a complex object. It can have multiple different +// subtypes for each field, as defined in the `Fields` field. Mandatory +// fields are listed in `RequiredFields`. The key of the object is +// always of type `string`. +type Kind struct { + BaseSchema + + // Lists names of required fields. + RequiredFields []string + // Maps field names to types. + Fields map[string]Schema +} + +var _ Schema = &Kind{} + +func (k *Kind) Accept(v SchemaVisitor) { + v.VisitKind(k) +} + +func (k *Kind) GetName() string { + properties := []string{} + for key := range k.Fields { + properties = append(properties, key) + } + return fmt.Sprintf("Kind(%v)", properties) +} + +// IsRequired returns true if `field` is a required field for this type. +func (k *Kind) IsRequired(field string) bool { + for _, f := range k.RequiredFields { + if f == field { + return true + } + } + return false +} + +// Keys returns a alphabetically sorted list of keys. +func (k *Kind) Keys() []string { + keys := make([]string, 0) + for key := range k.Fields { + keys = append(keys, key) + } + sort.Strings(keys) + return keys +} + +// Map is an object who values must all be of the same `SubType`. +// The key of the object is always of type `string`. +type Map struct { + BaseSchema + + SubType Schema +} + +var _ Schema = &Map{} + +func (m *Map) Accept(v SchemaVisitor) { + v.VisitMap(m) +} + +func (m *Map) GetName() string { + return fmt.Sprintf("Map of %s", m.SubType.GetName()) +} + +// Primitive is a literal. There can be multiple types of primitives, +// and this subtype can be visited through the `subType` field. +type Primitive struct { + BaseSchema + + // Type of a primitive must be one of: integer, number, string, boolean. + Type string + Format string +} + +var _ Schema = &Primitive{} + +func (p *Primitive) Accept(v SchemaVisitor) { + v.VisitPrimitive(p) +} + +func (p *Primitive) GetName() string { + if p.Format == "" { + return p.Type + } + return fmt.Sprintf("%s (%s)", p.Type, p.Format) +} + +// Reference implementation depends on the type of document. +type Reference interface { + Schema + + Reference() string + SubSchema() Schema +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/service/util.go b/vendor/k8s.io/kubernetes/pkg/api/service/util.go index 242aab77..5de5f276 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/service/util.go +++ b/vendor/k8s.io/kubernetes/pkg/api/service/util.go @@ -18,10 +18,9 @@ package service import ( "fmt" - "strings" - - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" netsets "k8s.io/kubernetes/pkg/util/net/sets" + "strings" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go deleted file mode 100644 index e16af3ad..00000000 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.conversion.go +++ /dev/null @@ -1,5430 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file was autogenerated by conversion-gen. Do not edit it manually! - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" - api "k8s.io/kubernetes/pkg/api" - unsafe "unsafe" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(scheme *runtime.Scheme) error { - return scheme.AddGeneratedConversionFuncs( - Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource, - Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, - Convert_v1_Affinity_To_api_Affinity, - Convert_api_Affinity_To_v1_Affinity, - Convert_v1_AttachedVolume_To_api_AttachedVolume, - Convert_api_AttachedVolume_To_v1_AttachedVolume, - Convert_v1_AvoidPods_To_api_AvoidPods, - Convert_api_AvoidPods_To_v1_AvoidPods, - Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource, - Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, - Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource, - Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, - Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource, - Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, - Convert_v1_Binding_To_api_Binding, - Convert_api_Binding_To_v1_Binding, - Convert_v1_Capabilities_To_api_Capabilities, - Convert_api_Capabilities_To_v1_Capabilities, - Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource, - Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, - Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource, - Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource, - Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource, - Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource, - Convert_v1_ClientIPConfig_To_api_ClientIPConfig, - Convert_api_ClientIPConfig_To_v1_ClientIPConfig, - Convert_v1_ComponentCondition_To_api_ComponentCondition, - Convert_api_ComponentCondition_To_v1_ComponentCondition, - Convert_v1_ComponentStatus_To_api_ComponentStatus, - Convert_api_ComponentStatus_To_v1_ComponentStatus, - Convert_v1_ComponentStatusList_To_api_ComponentStatusList, - Convert_api_ComponentStatusList_To_v1_ComponentStatusList, - Convert_v1_ConfigMap_To_api_ConfigMap, - Convert_api_ConfigMap_To_v1_ConfigMap, - Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource, - Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, - Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector, - Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, - Convert_v1_ConfigMapList_To_api_ConfigMapList, - Convert_api_ConfigMapList_To_v1_ConfigMapList, - Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection, - Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection, - Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource, - Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, - Convert_v1_Container_To_api_Container, - Convert_api_Container_To_v1_Container, - Convert_v1_ContainerImage_To_api_ContainerImage, - Convert_api_ContainerImage_To_v1_ContainerImage, - Convert_v1_ContainerPort_To_api_ContainerPort, - Convert_api_ContainerPort_To_v1_ContainerPort, - Convert_v1_ContainerState_To_api_ContainerState, - Convert_api_ContainerState_To_v1_ContainerState, - Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning, - Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning, - Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated, - Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated, - Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting, - Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting, - Convert_v1_ContainerStatus_To_api_ContainerStatus, - Convert_api_ContainerStatus_To_v1_ContainerStatus, - Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint, - Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint, - Convert_v1_DeleteOptions_To_api_DeleteOptions, - Convert_api_DeleteOptions_To_v1_DeleteOptions, - Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection, - Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection, - Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile, - Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, - Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource, - Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, - Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource, - Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, - Convert_v1_EndpointAddress_To_api_EndpointAddress, - Convert_api_EndpointAddress_To_v1_EndpointAddress, - Convert_v1_EndpointPort_To_api_EndpointPort, - Convert_api_EndpointPort_To_v1_EndpointPort, - Convert_v1_EndpointSubset_To_api_EndpointSubset, - Convert_api_EndpointSubset_To_v1_EndpointSubset, - Convert_v1_Endpoints_To_api_Endpoints, - Convert_api_Endpoints_To_v1_Endpoints, - Convert_v1_EndpointsList_To_api_EndpointsList, - Convert_api_EndpointsList_To_v1_EndpointsList, - Convert_v1_EnvFromSource_To_api_EnvFromSource, - Convert_api_EnvFromSource_To_v1_EnvFromSource, - Convert_v1_EnvVar_To_api_EnvVar, - Convert_api_EnvVar_To_v1_EnvVar, - Convert_v1_EnvVarSource_To_api_EnvVarSource, - Convert_api_EnvVarSource_To_v1_EnvVarSource, - Convert_v1_Event_To_api_Event, - Convert_api_Event_To_v1_Event, - Convert_v1_EventList_To_api_EventList, - Convert_api_EventList_To_v1_EventList, - Convert_v1_EventSource_To_api_EventSource, - Convert_api_EventSource_To_v1_EventSource, - Convert_v1_ExecAction_To_api_ExecAction, - Convert_api_ExecAction_To_v1_ExecAction, - Convert_v1_FCVolumeSource_To_api_FCVolumeSource, - Convert_api_FCVolumeSource_To_v1_FCVolumeSource, - Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource, - Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource, - Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource, - Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource, - Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource, - Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, - Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource, - Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, - Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource, - Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, - Convert_v1_HTTPGetAction_To_api_HTTPGetAction, - Convert_api_HTTPGetAction_To_v1_HTTPGetAction, - Convert_v1_HTTPHeader_To_api_HTTPHeader, - Convert_api_HTTPHeader_To_v1_HTTPHeader, - Convert_v1_Handler_To_api_Handler, - Convert_api_Handler_To_v1_Handler, - Convert_v1_HostAlias_To_api_HostAlias, - Convert_api_HostAlias_To_v1_HostAlias, - Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource, - Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource, - Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource, - Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, - Convert_v1_KeyToPath_To_api_KeyToPath, - Convert_api_KeyToPath_To_v1_KeyToPath, - Convert_v1_Lifecycle_To_api_Lifecycle, - Convert_api_Lifecycle_To_v1_Lifecycle, - Convert_v1_LimitRange_To_api_LimitRange, - Convert_api_LimitRange_To_v1_LimitRange, - Convert_v1_LimitRangeItem_To_api_LimitRangeItem, - Convert_api_LimitRangeItem_To_v1_LimitRangeItem, - Convert_v1_LimitRangeList_To_api_LimitRangeList, - Convert_api_LimitRangeList_To_v1_LimitRangeList, - Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec, - Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec, - Convert_v1_List_To_api_List, - Convert_api_List_To_v1_List, - Convert_v1_ListOptions_To_api_ListOptions, - Convert_api_ListOptions_To_v1_ListOptions, - Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress, - Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress, - Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus, - Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus, - Convert_v1_LocalObjectReference_To_api_LocalObjectReference, - Convert_api_LocalObjectReference_To_v1_LocalObjectReference, - Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource, - Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource, - Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource, - Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource, - Convert_v1_Namespace_To_api_Namespace, - Convert_api_Namespace_To_v1_Namespace, - Convert_v1_NamespaceList_To_api_NamespaceList, - Convert_api_NamespaceList_To_v1_NamespaceList, - Convert_v1_NamespaceSpec_To_api_NamespaceSpec, - Convert_api_NamespaceSpec_To_v1_NamespaceSpec, - Convert_v1_NamespaceStatus_To_api_NamespaceStatus, - Convert_api_NamespaceStatus_To_v1_NamespaceStatus, - Convert_v1_Node_To_api_Node, - Convert_api_Node_To_v1_Node, - Convert_v1_NodeAddress_To_api_NodeAddress, - Convert_api_NodeAddress_To_v1_NodeAddress, - Convert_v1_NodeAffinity_To_api_NodeAffinity, - Convert_api_NodeAffinity_To_v1_NodeAffinity, - Convert_v1_NodeCondition_To_api_NodeCondition, - Convert_api_NodeCondition_To_v1_NodeCondition, - Convert_v1_NodeConfigSource_To_api_NodeConfigSource, - Convert_api_NodeConfigSource_To_v1_NodeConfigSource, - Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints, - Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, - Convert_v1_NodeList_To_api_NodeList, - Convert_api_NodeList_To_v1_NodeList, - Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions, - Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions, - Convert_v1_NodeResources_To_api_NodeResources, - Convert_api_NodeResources_To_v1_NodeResources, - Convert_v1_NodeSelector_To_api_NodeSelector, - Convert_api_NodeSelector_To_v1_NodeSelector, - Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement, - Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, - Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm, - Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm, - Convert_v1_NodeSpec_To_api_NodeSpec, - Convert_api_NodeSpec_To_v1_NodeSpec, - Convert_v1_NodeStatus_To_api_NodeStatus, - Convert_api_NodeStatus_To_v1_NodeStatus, - Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo, - Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo, - Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector, - Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector, - Convert_v1_ObjectMeta_To_api_ObjectMeta, - Convert_api_ObjectMeta_To_v1_ObjectMeta, - Convert_v1_ObjectReference_To_api_ObjectReference, - Convert_api_ObjectReference_To_v1_ObjectReference, - Convert_v1_PersistentVolume_To_api_PersistentVolume, - Convert_api_PersistentVolume_To_v1_PersistentVolume, - Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim, - Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, - Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition, - Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, - Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList, - Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, - Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec, - Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, - Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus, - Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, - Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource, - Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, - Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList, - Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList, - Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource, - Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource, - Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec, - Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, - Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus, - Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, - Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource, - Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, - Convert_v1_Pod_To_api_Pod, - Convert_api_Pod_To_v1_Pod, - Convert_v1_PodAffinity_To_api_PodAffinity, - Convert_api_PodAffinity_To_v1_PodAffinity, - Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm, - Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm, - Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity, - Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity, - Convert_v1_PodAttachOptions_To_api_PodAttachOptions, - Convert_api_PodAttachOptions_To_v1_PodAttachOptions, - Convert_v1_PodCondition_To_api_PodCondition, - Convert_api_PodCondition_To_v1_PodCondition, - Convert_v1_PodExecOptions_To_api_PodExecOptions, - Convert_api_PodExecOptions_To_v1_PodExecOptions, - Convert_v1_PodList_To_api_PodList, - Convert_api_PodList_To_v1_PodList, - Convert_v1_PodLogOptions_To_api_PodLogOptions, - Convert_api_PodLogOptions_To_v1_PodLogOptions, - Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions, - Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions, - Convert_v1_PodProxyOptions_To_api_PodProxyOptions, - Convert_api_PodProxyOptions_To_v1_PodProxyOptions, - Convert_v1_PodSecurityContext_To_api_PodSecurityContext, - Convert_api_PodSecurityContext_To_v1_PodSecurityContext, - Convert_v1_PodSignature_To_api_PodSignature, - Convert_api_PodSignature_To_v1_PodSignature, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_v1_PodStatus_To_api_PodStatus, - Convert_api_PodStatus_To_v1_PodStatus, - Convert_v1_PodStatusResult_To_api_PodStatusResult, - Convert_api_PodStatusResult_To_v1_PodStatusResult, - Convert_v1_PodTemplate_To_api_PodTemplate, - Convert_api_PodTemplate_To_v1_PodTemplate, - Convert_v1_PodTemplateList_To_api_PodTemplateList, - Convert_api_PodTemplateList_To_v1_PodTemplateList, - Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec, - Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec, - Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource, - Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource, - Convert_v1_Preconditions_To_api_Preconditions, - Convert_api_Preconditions_To_v1_Preconditions, - Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry, - Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, - Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm, - Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, - Convert_v1_Probe_To_api_Probe, - Convert_api_Probe_To_v1_Probe, - Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource, - Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, - Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource, - Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, - Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource, - Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource, - Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource, - Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource, - Convert_v1_RangeAllocation_To_api_RangeAllocation, - Convert_api_RangeAllocation_To_v1_RangeAllocation, - Convert_v1_ReplicationController_To_api_ReplicationController, - Convert_api_ReplicationController_To_v1_ReplicationController, - Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition, - Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, - Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList, - Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus, - Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, - Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector, - Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector, - Convert_v1_ResourceQuota_To_api_ResourceQuota, - Convert_api_ResourceQuota_To_v1_ResourceQuota, - Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList, - Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList, - Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec, - Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, - Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus, - Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, - Convert_v1_ResourceRequirements_To_api_ResourceRequirements, - Convert_api_ResourceRequirements_To_v1_ResourceRequirements, - Convert_v1_SELinuxOptions_To_api_SELinuxOptions, - Convert_api_SELinuxOptions_To_v1_SELinuxOptions, - Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource, - Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource, - Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource, - Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, - Convert_v1_Secret_To_api_Secret, - Convert_api_Secret_To_v1_Secret, - Convert_v1_SecretEnvSource_To_api_SecretEnvSource, - Convert_api_SecretEnvSource_To_v1_SecretEnvSource, - Convert_v1_SecretKeySelector_To_api_SecretKeySelector, - Convert_api_SecretKeySelector_To_v1_SecretKeySelector, - Convert_v1_SecretList_To_api_SecretList, - Convert_api_SecretList_To_v1_SecretList, - Convert_v1_SecretProjection_To_api_SecretProjection, - Convert_api_SecretProjection_To_v1_SecretProjection, - Convert_v1_SecretReference_To_api_SecretReference, - Convert_api_SecretReference_To_v1_SecretReference, - Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource, - Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource, - Convert_v1_SecurityContext_To_api_SecurityContext, - Convert_api_SecurityContext_To_v1_SecurityContext, - Convert_v1_SerializedReference_To_api_SerializedReference, - Convert_api_SerializedReference_To_v1_SerializedReference, - Convert_v1_Service_To_api_Service, - Convert_api_Service_To_v1_Service, - Convert_v1_ServiceAccount_To_api_ServiceAccount, - Convert_api_ServiceAccount_To_v1_ServiceAccount, - Convert_v1_ServiceAccountList_To_api_ServiceAccountList, - Convert_api_ServiceAccountList_To_v1_ServiceAccountList, - Convert_v1_ServiceList_To_api_ServiceList, - Convert_api_ServiceList_To_v1_ServiceList, - Convert_v1_ServicePort_To_api_ServicePort, - Convert_api_ServicePort_To_v1_ServicePort, - Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions, - Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_ServiceStatus_To_api_ServiceStatus, - Convert_api_ServiceStatus_To_v1_ServiceStatus, - Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig, - Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig, - Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource, - Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, - Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource, - Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, - Convert_v1_Sysctl_To_api_Sysctl, - Convert_api_Sysctl_To_v1_Sysctl, - Convert_v1_TCPSocketAction_To_api_TCPSocketAction, - Convert_api_TCPSocketAction_To_v1_TCPSocketAction, - Convert_v1_Taint_To_api_Taint, - Convert_api_Taint_To_v1_Taint, - Convert_v1_Toleration_To_api_Toleration, - Convert_api_Toleration_To_v1_Toleration, - Convert_v1_Volume_To_api_Volume, - Convert_api_Volume_To_v1_Volume, - Convert_v1_VolumeMount_To_api_VolumeMount, - Convert_api_VolumeMount_To_v1_VolumeMount, - Convert_v1_VolumeProjection_To_api_VolumeProjection, - Convert_api_VolumeProjection_To_v1_VolumeProjection, - Convert_v1_VolumeSource_To_api_VolumeSource, - Convert_api_VolumeSource_To_v1_VolumeSource, - Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource, - Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, - Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm, - Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, - ) -} - -func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *api.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_api_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. -func Convert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *api.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { - return autoConvert_api_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) -} - -func autoConvert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*api.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*api.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*api.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_v1_Affinity_To_api_Affinity is an autogenerated conversion function. -func Convert_v1_Affinity_To_api_Affinity(in *v1.Affinity, out *api.Affinity, s conversion.Scope) error { - return autoConvert_v1_Affinity_To_api_Affinity(in, out, s) -} - -func autoConvert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) - out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) - out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) - return nil -} - -// Convert_api_Affinity_To_v1_Affinity is an autogenerated conversion function. -func Convert_api_Affinity_To_v1_Affinity(in *api.Affinity, out *v1.Affinity, s conversion.Scope) error { - return autoConvert_api_Affinity_To_v1_Affinity(in, out, s) -} - -func autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - out.Name = api.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_v1_AttachedVolume_To_api_AttachedVolume is an autogenerated conversion function. -func Convert_v1_AttachedVolume_To_api_AttachedVolume(in *v1.AttachedVolume, out *api.AttachedVolume, s conversion.Scope) error { - return autoConvert_v1_AttachedVolume_To_api_AttachedVolume(in, out, s) -} - -func autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - out.Name = v1.UniqueVolumeName(in.Name) - out.DevicePath = in.DevicePath - return nil -} - -// Convert_api_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. -func Convert_api_AttachedVolume_To_v1_AttachedVolume(in *api.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { - return autoConvert_api_AttachedVolume_To_v1_AttachedVolume(in, out, s) -} - -func autoConvert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]api.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_v1_AvoidPods_To_api_AvoidPods is an autogenerated conversion function. -func Convert_v1_AvoidPods_To_api_AvoidPods(in *v1.AvoidPods, out *api.AvoidPods, s conversion.Scope) error { - return autoConvert_v1_AvoidPods_To_api_AvoidPods(in, out, s) -} - -func autoConvert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) - return nil -} - -// Convert_api_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. -func Convert_api_AvoidPods_To_v1_AvoidPods(in *api.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { - return autoConvert_api_AvoidPods_To_v1_AvoidPods(in, out, s) -} - -func autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*api.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*api.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *api.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureDiskVolumeSource_To_api_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - out.DiskName = in.DiskName - out.DataDiskURI = in.DataDiskURI - out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) - out.FSType = (*string)(unsafe.Pointer(in.FSType)) - out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) - out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) - return nil -} - -// Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. -func Convert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *api.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *api.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFilePersistentVolumeSource_To_api_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) - return nil -} - -// Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *api.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *api.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_v1_AzureFileVolumeSource_To_api_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.ShareName = in.ShareName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. -func Convert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *api.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { - return autoConvert_api_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) -} - -func autoConvert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Binding_To_api_Binding is an autogenerated conversion function. -func Convert_v1_Binding_To_api_Binding(in *v1.Binding, out *api.Binding, s conversion.Scope) error { - return autoConvert_v1_Binding_To_api_Binding(in, out, s) -} - -func autoConvert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { - return err - } - return nil -} - -// Convert_api_Binding_To_v1_Binding is an autogenerated conversion function. -func Convert_api_Binding_To_v1_Binding(in *api.Binding, out *v1.Binding, s conversion.Scope) error { - return autoConvert_api_Binding_To_v1_Binding(in, out, s) -} - -func autoConvert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - out.Add = *(*[]api.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]api.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_v1_Capabilities_To_api_Capabilities is an autogenerated conversion function. -func Convert_v1_Capabilities_To_api_Capabilities(in *v1.Capabilities, out *api.Capabilities, s conversion.Scope) error { - return autoConvert_v1_Capabilities_To_api_Capabilities(in, out, s) -} - -func autoConvert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) - out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) - return nil -} - -// Convert_api_Capabilities_To_v1_Capabilities is an autogenerated conversion function. -func Convert_api_Capabilities_To_v1_Capabilities(in *api.Capabilities, out *v1.Capabilities, s conversion.Scope) error { - return autoConvert_api_Capabilities_To_v1_Capabilities(in, out, s) -} - -func autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *api.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSPersistentVolumeSource_To_api_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *api.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource is an autogenerated conversion function. -func Convert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *api.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CephFSVolumeSource_To_api_CephFSVolumeSource(in, out, s) -} - -func autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) - out.Path = in.Path - out.User = in.User - out.SecretFile = in.SecretFile - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. -func Convert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *api.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) -} - -func autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource is an autogenerated conversion function. -func Convert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in *v1.CinderVolumeSource, out *api.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_v1_CinderVolumeSource_To_api_CinderVolumeSource(in, out, s) -} - -func autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. -func Convert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in *api.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { - return autoConvert_api_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) -} - -func autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ClientIPConfig_To_api_ClientIPConfig is an autogenerated conversion function. -func Convert_v1_ClientIPConfig_To_api_ClientIPConfig(in *v1.ClientIPConfig, out *api.ClientIPConfig, s conversion.Scope) error { - return autoConvert_v1_ClientIPConfig_To_api_ClientIPConfig(in, out, s) -} - -func autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. -func Convert_api_ClientIPConfig_To_v1_ClientIPConfig(in *api.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { - return autoConvert_api_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) -} - -func autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - out.Type = api.ComponentConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_v1_ComponentCondition_To_api_ComponentCondition is an autogenerated conversion function. -func Convert_v1_ComponentCondition_To_api_ComponentCondition(in *v1.ComponentCondition, out *api.ComponentCondition, s conversion.Scope) error { - return autoConvert_v1_ComponentCondition_To_api_ComponentCondition(in, out, s) -} - -func autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - out.Type = v1.ComponentConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.Message = in.Message - out.Error = in.Error - return nil -} - -// Convert_api_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. -func Convert_api_ComponentCondition_To_v1_ComponentCondition(in *api.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { - return autoConvert_api_ComponentCondition_To_v1_ComponentCondition(in, out, s) -} - -func autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]api.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ComponentStatus_To_api_ComponentStatus is an autogenerated conversion function. -func Convert_v1_ComponentStatus_To_api_ComponentStatus(in *v1.ComponentStatus, out *api.ComponentStatus, s conversion.Scope) error { - return autoConvert_v1_ComponentStatus_To_api_ComponentStatus(in, out, s) -} - -func autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. -func Convert_api_ComponentStatus_To_v1_ComponentStatus(in *api.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { - return autoConvert_api_ComponentStatus_To_v1_ComponentStatus(in, out, s) -} - -func autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ComponentStatusList_To_api_ComponentStatusList is an autogenerated conversion function. -func Convert_v1_ComponentStatusList_To_api_ComponentStatusList(in *v1.ComponentStatusList, out *api.ComponentStatusList, s conversion.Scope) error { - return autoConvert_v1_ComponentStatusList_To_api_ComponentStatusList(in, out, s) -} - -func autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. -func Convert_api_ComponentStatusList_To_v1_ComponentStatusList(in *api.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { - return autoConvert_api_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) -} - -func autoConvert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_ConfigMap_To_api_ConfigMap is an autogenerated conversion function. -func Convert_v1_ConfigMap_To_api_ConfigMap(in *v1.ConfigMap, out *api.ConfigMap, s conversion.Scope) error { - return autoConvert_v1_ConfigMap_To_api_ConfigMap(in, out, s) -} - -func autoConvert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. -func Convert_api_ConfigMap_To_v1_ConfigMap(in *api.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { - return autoConvert_api_ConfigMap_To_v1_ConfigMap(in, out, s) -} - -func autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *api.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapEnvSource_To_api_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. -func Convert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *api.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) -} - -func autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *api.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_v1_ConfigMapKeySelector_To_api_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. -func Convert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *api.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { - return autoConvert_api_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) -} - -func autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ConfigMapList_To_api_ConfigMapList is an autogenerated conversion function. -func Convert_v1_ConfigMapList_To_api_ConfigMapList(in *v1.ConfigMapList, out *api.ConfigMapList, s conversion.Scope) error { - return autoConvert_v1_ConfigMapList_To_api_ConfigMapList(in, out, s) -} - -func autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. -func Convert_api_ConfigMapList_To_v1_ConfigMapList(in *api.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { - return autoConvert_api_ConfigMapList_To_v1_ConfigMapList(in, out, s) -} - -func autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection is an autogenerated conversion function. -func Convert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in *v1.ConfigMapProjection, out *api.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_v1_ConfigMapProjection_To_api_ConfigMapProjection(in, out, s) -} - -func autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. -func Convert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in *api.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { - return autoConvert_api_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) -} - -func autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *api.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ConfigMapVolumeSource_To_api_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. -func Convert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *api.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { - return autoConvert_api_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) -} - -func autoConvert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]api.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]api.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]api.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]api.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*api.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*api.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*api.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = api.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = api.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.SecurityContext) - if err := Convert_v1_SecurityContext_To_api_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_v1_Container_To_api_Container is an autogenerated conversion function. -func Convert_v1_Container_To_api_Container(in *v1.Container, out *api.Container, s conversion.Scope) error { - return autoConvert_v1_Container_To_api_Container(in, out, s) -} - -func autoConvert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - out.Name = in.Name - out.Image = in.Image - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) - out.WorkingDir = in.WorkingDir - out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) - out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) - out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) - out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) - out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) - out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) - out.TerminationMessagePath = in.TerminationMessagePath - out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) - out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.SecurityContext) - if err := Convert_api_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.Stdin = in.Stdin - out.StdinOnce = in.StdinOnce - out.TTY = in.TTY - return nil -} - -// Convert_api_Container_To_v1_Container is an autogenerated conversion function. -func Convert_api_Container_To_v1_Container(in *api.Container, out *v1.Container, s conversion.Scope) error { - return autoConvert_api_Container_To_v1_Container(in, out, s) -} - -func autoConvert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_v1_ContainerImage_To_api_ContainerImage is an autogenerated conversion function. -func Convert_v1_ContainerImage_To_api_ContainerImage(in *v1.ContainerImage, out *api.ContainerImage, s conversion.Scope) error { - return autoConvert_v1_ContainerImage_To_api_ContainerImage(in, out, s) -} - -func autoConvert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) - out.SizeBytes = in.SizeBytes - return nil -} - -// Convert_api_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. -func Convert_api_ContainerImage_To_v1_ContainerImage(in *api.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { - return autoConvert_api_ContainerImage_To_v1_ContainerImage(in, out, s) -} - -func autoConvert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = api.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_v1_ContainerPort_To_api_ContainerPort is an autogenerated conversion function. -func Convert_v1_ContainerPort_To_api_ContainerPort(in *v1.ContainerPort, out *api.ContainerPort, s conversion.Scope) error { - return autoConvert_v1_ContainerPort_To_api_ContainerPort(in, out, s) -} - -func autoConvert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - out.Name = in.Name - out.HostPort = in.HostPort - out.ContainerPort = in.ContainerPort - out.Protocol = v1.Protocol(in.Protocol) - out.HostIP = in.HostIP - return nil -} - -// Convert_api_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. -func Convert_api_ContainerPort_To_v1_ContainerPort(in *api.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { - return autoConvert_api_ContainerPort_To_v1_ContainerPort(in, out, s) -} - -func autoConvert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - out.Waiting = (*api.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*api.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*api.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_v1_ContainerState_To_api_ContainerState is an autogenerated conversion function. -func Convert_v1_ContainerState_To_api_ContainerState(in *v1.ContainerState, out *api.ContainerState, s conversion.Scope) error { - return autoConvert_v1_ContainerState_To_api_ContainerState(in, out, s) -} - -func autoConvert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) - out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) - out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) - return nil -} - -// Convert_api_ContainerState_To_v1_ContainerState is an autogenerated conversion function. -func Convert_api_ContainerState_To_v1_ContainerState(in *api.ContainerState, out *v1.ContainerState, s conversion.Scope) error { - return autoConvert_api_ContainerState_To_v1_ContainerState(in, out, s) -} - -func autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning is an autogenerated conversion function. -func Convert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in *v1.ContainerStateRunning, out *api.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_v1_ContainerStateRunning_To_api_ContainerStateRunning(in, out, s) -} - -func autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - out.StartedAt = in.StartedAt - return nil -} - -// Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. -func Convert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in *api.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { - return autoConvert_api_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) -} - -func autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated is an autogenerated conversion function. -func Convert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *api.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_v1_ContainerStateTerminated_To_api_ContainerStateTerminated(in, out, s) -} - -func autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - out.ExitCode = in.ExitCode - out.Signal = in.Signal - out.Reason = in.Reason - out.Message = in.Message - out.StartedAt = in.StartedAt - out.FinishedAt = in.FinishedAt - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. -func Convert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *api.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { - return autoConvert_api_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) -} - -func autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting is an autogenerated conversion function. -func Convert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *api.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_v1_ContainerStateWaiting_To_api_ContainerStateWaiting(in, out, s) -} - -func autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. -func Convert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *api.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { - return autoConvert_api_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) -} - -func autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_v1_ContainerState_To_api_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_v1_ContainerStatus_To_api_ContainerStatus is an autogenerated conversion function. -func Convert_v1_ContainerStatus_To_api_ContainerStatus(in *v1.ContainerStatus, out *api.ContainerStatus, s conversion.Scope) error { - return autoConvert_v1_ContainerStatus_To_api_ContainerStatus(in, out, s) -} - -func autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { - return err - } - if err := Convert_api_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { - return err - } - out.Ready = in.Ready - out.RestartCount = in.RestartCount - out.Image = in.Image - out.ImageID = in.ImageID - out.ContainerID = in.ContainerID - return nil -} - -// Convert_api_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. -func Convert_api_ContainerStatus_To_v1_ContainerStatus(in *api.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { - return autoConvert_api_ContainerStatus_To_v1_ContainerStatus(in, out, s) -} - -func autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint is an autogenerated conversion function. -func Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in *v1.DaemonEndpoint, out *api.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_v1_DaemonEndpoint_To_api_DaemonEndpoint(in, out, s) -} - -func autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - out.Port = in.Port - return nil -} - -// Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. -func Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in *api.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { - return autoConvert_api_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) -} - -func autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*api.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*api.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_v1_DeleteOptions_To_api_DeleteOptions is an autogenerated conversion function. -func Convert_v1_DeleteOptions_To_api_DeleteOptions(in *v1.DeleteOptions, out *api.DeleteOptions, s conversion.Scope) error { - return autoConvert_v1_DeleteOptions_To_api_DeleteOptions(in, out, s) -} - -func autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) - out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) - out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) - out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) - return nil -} - -// Convert_api_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. -func Convert_api_DeleteOptions_To_v1_DeleteOptions(in *api.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { - return autoConvert_api_DeleteOptions_To_v1_DeleteOptions(in, out, s) -} - -func autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection is an autogenerated conversion function. -func Convert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *api.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIProjection_To_api_DownwardAPIProjection(in, out, s) -} - -func autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. -func Convert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *api.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { - return autoConvert_api_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *api.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeFile_To_api_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - out.Path = in.Path - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *api.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) -} - -func autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]api.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *api.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_DownwardAPIVolumeSource_To_api_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. -func Convert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *api.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { - return autoConvert_api_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) -} - -func autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = api.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *api.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_v1_EmptyDirVolumeSource_To_api_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - out.Medium = v1.StorageMedium(in.Medium) - out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) - return nil -} - -// Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. -func Convert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *api.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { - return autoConvert_api_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) -} - -func autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*api.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_v1_EndpointAddress_To_api_EndpointAddress is an autogenerated conversion function. -func Convert_v1_EndpointAddress_To_api_EndpointAddress(in *v1.EndpointAddress, out *api.EndpointAddress, s conversion.Scope) error { - return autoConvert_v1_EndpointAddress_To_api_EndpointAddress(in, out, s) -} - -func autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) - out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) - return nil -} - -// Convert_api_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. -func Convert_api_EndpointAddress_To_v1_EndpointAddress(in *api.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { - return autoConvert_api_EndpointAddress_To_v1_EndpointAddress(in, out, s) -} - -func autoConvert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = api.Protocol(in.Protocol) - return nil -} - -// Convert_v1_EndpointPort_To_api_EndpointPort is an autogenerated conversion function. -func Convert_v1_EndpointPort_To_api_EndpointPort(in *v1.EndpointPort, out *api.EndpointPort, s conversion.Scope) error { - return autoConvert_v1_EndpointPort_To_api_EndpointPort(in, out, s) -} - -func autoConvert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - out.Name = in.Name - out.Port = in.Port - out.Protocol = v1.Protocol(in.Protocol) - return nil -} - -// Convert_api_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. -func Convert_api_EndpointPort_To_v1_EndpointPort(in *api.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { - return autoConvert_api_EndpointPort_To_v1_EndpointPort(in, out, s) -} - -func autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]api.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]api.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_EndpointSubset_To_api_EndpointSubset is an autogenerated conversion function. -func Convert_v1_EndpointSubset_To_api_EndpointSubset(in *v1.EndpointSubset, out *api.EndpointSubset, s conversion.Scope) error { - return autoConvert_v1_EndpointSubset_To_api_EndpointSubset(in, out, s) -} - -func autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) - out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) - out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. -func Convert_api_EndpointSubset_To_v1_EndpointSubset(in *api.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { - return autoConvert_api_EndpointSubset_To_v1_EndpointSubset(in, out, s) -} - -func autoConvert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]api.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_v1_Endpoints_To_api_Endpoints is an autogenerated conversion function. -func Convert_v1_Endpoints_To_api_Endpoints(in *v1.Endpoints, out *api.Endpoints, s conversion.Scope) error { - return autoConvert_v1_Endpoints_To_api_Endpoints(in, out, s) -} - -func autoConvert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) - return nil -} - -// Convert_api_Endpoints_To_v1_Endpoints is an autogenerated conversion function. -func Convert_api_Endpoints_To_v1_Endpoints(in *api.Endpoints, out *v1.Endpoints, s conversion.Scope) error { - return autoConvert_api_Endpoints_To_v1_Endpoints(in, out, s) -} - -func autoConvert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EndpointsList_To_api_EndpointsList is an autogenerated conversion function. -func Convert_v1_EndpointsList_To_api_EndpointsList(in *v1.EndpointsList, out *api.EndpointsList, s conversion.Scope) error { - return autoConvert_v1_EndpointsList_To_api_EndpointsList(in, out, s) -} - -func autoConvert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. -func Convert_api_EndpointsList_To_v1_EndpointsList(in *api.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { - return autoConvert_api_EndpointsList_To_v1_EndpointsList(in, out, s) -} - -func autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*api.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*api.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_EnvFromSource_To_api_EnvFromSource is an autogenerated conversion function. -func Convert_v1_EnvFromSource_To_api_EnvFromSource(in *v1.EnvFromSource, out *api.EnvFromSource, s conversion.Scope) error { - return autoConvert_v1_EnvFromSource_To_api_EnvFromSource(in, out, s) -} - -func autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - out.Prefix = in.Prefix - out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) - out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. -func Convert_api_EnvFromSource_To_v1_EnvFromSource(in *api.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { - return autoConvert_api_EnvFromSource_To_v1_EnvFromSource(in, out, s) -} - -func autoConvert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*api.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_v1_EnvVar_To_api_EnvVar is an autogenerated conversion function. -func Convert_v1_EnvVar_To_api_EnvVar(in *v1.EnvVar, out *api.EnvVar, s conversion.Scope) error { - return autoConvert_v1_EnvVar_To_api_EnvVar(in, out, s) -} - -func autoConvert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) - return nil -} - -// Convert_api_EnvVar_To_v1_EnvVar is an autogenerated conversion function. -func Convert_api_EnvVar_To_v1_EnvVar(in *api.EnvVar, out *v1.EnvVar, s conversion.Scope) error { - return autoConvert_api_EnvVar_To_v1_EnvVar(in, out, s) -} - -func autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*api.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*api.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*api.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*api.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_v1_EnvVarSource_To_api_EnvVarSource is an autogenerated conversion function. -func Convert_v1_EnvVarSource_To_api_EnvVarSource(in *v1.EnvVarSource, out *api.EnvVarSource, s conversion.Scope) error { - return autoConvert_v1_EnvVarSource_To_api_EnvVarSource(in, out, s) -} - -func autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) - out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) - out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) - out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) - return nil -} - -// Convert_api_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. -func Convert_api_EnvVarSource_To_v1_EnvVarSource(in *api.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { - return autoConvert_api_EnvVarSource_To_v1_EnvVarSource(in, out, s) -} - -func autoConvert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_v1_EventSource_To_api_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_v1_Event_To_api_Event is an autogenerated conversion function. -func Convert_v1_Event_To_api_Event(in *v1.Event, out *api.Event, s conversion.Scope) error { - return autoConvert_v1_Event_To_api_Event(in, out, s) -} - -func autoConvert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { - return err - } - out.Reason = in.Reason - out.Message = in.Message - if err := Convert_api_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { - return err - } - out.FirstTimestamp = in.FirstTimestamp - out.LastTimestamp = in.LastTimestamp - out.Count = in.Count - out.Type = in.Type - return nil -} - -// Convert_api_Event_To_v1_Event is an autogenerated conversion function. -func Convert_api_Event_To_v1_Event(in *api.Event, out *v1.Event, s conversion.Scope) error { - return autoConvert_api_Event_To_v1_Event(in, out, s) -} - -func autoConvert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_EventList_To_api_EventList is an autogenerated conversion function. -func Convert_v1_EventList_To_api_EventList(in *v1.EventList, out *api.EventList, s conversion.Scope) error { - return autoConvert_v1_EventList_To_api_EventList(in, out, s) -} - -func autoConvert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_EventList_To_v1_EventList is an autogenerated conversion function. -func Convert_api_EventList_To_v1_EventList(in *api.EventList, out *v1.EventList, s conversion.Scope) error { - return autoConvert_api_EventList_To_v1_EventList(in, out, s) -} - -func autoConvert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_v1_EventSource_To_api_EventSource is an autogenerated conversion function. -func Convert_v1_EventSource_To_api_EventSource(in *v1.EventSource, out *api.EventSource, s conversion.Scope) error { - return autoConvert_v1_EventSource_To_api_EventSource(in, out, s) -} - -func autoConvert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - out.Component = in.Component - out.Host = in.Host - return nil -} - -// Convert_api_EventSource_To_v1_EventSource is an autogenerated conversion function. -func Convert_api_EventSource_To_v1_EventSource(in *api.EventSource, out *v1.EventSource, s conversion.Scope) error { - return autoConvert_api_EventSource_To_v1_EventSource(in, out, s) -} - -func autoConvert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_ExecAction_To_api_ExecAction is an autogenerated conversion function. -func Convert_v1_ExecAction_To_api_ExecAction(in *v1.ExecAction, out *api.ExecAction, s conversion.Scope) error { - return autoConvert_v1_ExecAction_To_api_ExecAction(in, out, s) -} - -func autoConvert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_ExecAction_To_v1_ExecAction is an autogenerated conversion function. -func Convert_api_ExecAction_To_v1_ExecAction(in *api.ExecAction, out *v1.ExecAction, s conversion.Scope) error { - return autoConvert_api_ExecAction_To_v1_ExecAction(in, out, s) -} - -func autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_v1_FCVolumeSource_To_api_FCVolumeSource is an autogenerated conversion function. -func Convert_v1_FCVolumeSource_To_api_FCVolumeSource(in *v1.FCVolumeSource, out *api.FCVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FCVolumeSource_To_api_FCVolumeSource(in, out, s) -} - -func autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) - out.Lun = (*int32)(unsafe.Pointer(in.Lun)) - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) - return nil -} - -// Convert_api_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. -func Convert_api_FCVolumeSource_To_v1_FCVolumeSource(in *api.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { - return autoConvert_api_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) -} - -func autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource is an autogenerated conversion function. -func Convert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in *v1.FlexVolumeSource, out *api.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlexVolumeSource_To_api_FlexVolumeSource(in, out, s) -} - -func autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - out.Driver = in.Driver - out.FSType = in.FSType - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) - return nil -} - -// Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. -func Convert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in *api.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) -} - -func autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource is an autogenerated conversion function. -func Convert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *api.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_v1_FlockerVolumeSource_To_api_FlockerVolumeSource(in, out, s) -} - -func autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - out.DatasetName = in.DatasetName - out.DatasetUUID = in.DatasetUUID - return nil -} - -// Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. -func Convert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *api.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { - return autoConvert_api_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) -} - -func autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *api.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GCEPersistentDiskVolumeSource_To_api_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - out.PDName = in.PDName - out.FSType = in.FSType - out.Partition = in.Partition - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *api.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *api.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GitRepoVolumeSource_To_api_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - out.Repository = in.Repository - out.Revision = in.Revision - out.Directory = in.Directory - return nil -} - -// Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. -func Convert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *api.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { - return autoConvert_api_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) -} - -func autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *api.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_v1_GlusterfsVolumeSource_To_api_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - out.EndpointsName = in.EndpointsName - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. -func Convert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *api.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { - return autoConvert_api_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) -} - -func autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = api.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]api.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_v1_HTTPGetAction_To_api_HTTPGetAction is an autogenerated conversion function. -func Convert_v1_HTTPGetAction_To_api_HTTPGetAction(in *v1.HTTPGetAction, out *api.HTTPGetAction, s conversion.Scope) error { - return autoConvert_v1_HTTPGetAction_To_api_HTTPGetAction(in, out, s) -} - -func autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - out.Path = in.Path - out.Port = in.Port - out.Host = in.Host - out.Scheme = v1.URIScheme(in.Scheme) - out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) - return nil -} - -// Convert_api_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. -func Convert_api_HTTPGetAction_To_v1_HTTPGetAction(in *api.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { - return autoConvert_api_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) -} - -func autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_HTTPHeader_To_api_HTTPHeader is an autogenerated conversion function. -func Convert_v1_HTTPHeader_To_api_HTTPHeader(in *v1.HTTPHeader, out *api.HTTPHeader, s conversion.Scope) error { - return autoConvert_v1_HTTPHeader_To_api_HTTPHeader(in, out, s) -} - -func autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. -func Convert_api_HTTPHeader_To_v1_HTTPHeader(in *api.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { - return autoConvert_api_HTTPHeader_To_v1_HTTPHeader(in, out, s) -} - -func autoConvert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - out.Exec = (*api.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*api.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*api.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_v1_Handler_To_api_Handler is an autogenerated conversion function. -func Convert_v1_Handler_To_api_Handler(in *v1.Handler, out *api.Handler, s conversion.Scope) error { - return autoConvert_v1_Handler_To_api_Handler(in, out, s) -} - -func autoConvert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) - out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) - out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) - return nil -} - -// Convert_api_Handler_To_v1_Handler is an autogenerated conversion function. -func Convert_api_Handler_To_v1_Handler(in *api.Handler, out *v1.Handler, s conversion.Scope) error { - return autoConvert_api_Handler_To_v1_Handler(in, out, s) -} - -func autoConvert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_v1_HostAlias_To_api_HostAlias is an autogenerated conversion function. -func Convert_v1_HostAlias_To_api_HostAlias(in *v1.HostAlias, out *api.HostAlias, s conversion.Scope) error { - return autoConvert_v1_HostAlias_To_api_HostAlias(in, out, s) -} - -func autoConvert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - out.IP = in.IP - out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) - return nil -} - -// Convert_api_HostAlias_To_v1_HostAlias is an autogenerated conversion function. -func Convert_api_HostAlias_To_v1_HostAlias(in *api.HostAlias, out *v1.HostAlias, s conversion.Scope) error { - return autoConvert_api_HostAlias_To_v1_HostAlias(in, out, s) -} - -func autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*api.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource is an autogenerated conversion function. -func Convert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *api.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_v1_HostPathVolumeSource_To_api_HostPathVolumeSource(in, out, s) -} - -func autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - out.Path = in.Path - out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) - return nil -} - -// Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. -func Convert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *api.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { - return autoConvert_api_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) -} - -func autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *api.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ISCSIVolumeSource_To_api_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - out.TargetPortal = in.TargetPortal - out.IQN = in.IQN - out.Lun = in.Lun - out.ISCSIInterface = in.ISCSIInterface - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) - out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth - out.SessionCHAPAuth = in.SessionCHAPAuth - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) - return nil -} - -// Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. -func Convert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *api.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { - return autoConvert_api_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) -} - -func autoConvert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_v1_KeyToPath_To_api_KeyToPath is an autogenerated conversion function. -func Convert_v1_KeyToPath_To_api_KeyToPath(in *v1.KeyToPath, out *api.KeyToPath, s conversion.Scope) error { - return autoConvert_v1_KeyToPath_To_api_KeyToPath(in, out, s) -} - -func autoConvert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - out.Key = in.Key - out.Path = in.Path - out.Mode = (*int32)(unsafe.Pointer(in.Mode)) - return nil -} - -// Convert_api_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. -func Convert_api_KeyToPath_To_v1_KeyToPath(in *api.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { - return autoConvert_api_KeyToPath_To_v1_KeyToPath(in, out, s) -} - -func autoConvert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - out.PostStart = (*api.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*api.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_v1_Lifecycle_To_api_Lifecycle is an autogenerated conversion function. -func Convert_v1_Lifecycle_To_api_Lifecycle(in *v1.Lifecycle, out *api.Lifecycle, s conversion.Scope) error { - return autoConvert_v1_Lifecycle_To_api_Lifecycle(in, out, s) -} - -func autoConvert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) - out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) - return nil -} - -// Convert_api_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. -func Convert_api_Lifecycle_To_v1_Lifecycle(in *api.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { - return autoConvert_api_Lifecycle_To_v1_Lifecycle(in, out, s) -} - -func autoConvert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1_LimitRange_To_api_LimitRange is an autogenerated conversion function. -func Convert_v1_LimitRange_To_api_LimitRange(in *v1.LimitRange, out *api.LimitRange, s conversion.Scope) error { - return autoConvert_v1_LimitRange_To_api_LimitRange(in, out, s) -} - -func autoConvert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_api_LimitRange_To_v1_LimitRange is an autogenerated conversion function. -func Convert_api_LimitRange_To_v1_LimitRange(in *api.LimitRange, out *v1.LimitRange, s conversion.Scope) error { - return autoConvert_api_LimitRange_To_v1_LimitRange(in, out, s) -} - -func autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - out.Type = api.LimitType(in.Type) - out.Max = *(*api.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*api.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*api.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*api.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*api.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_v1_LimitRangeItem_To_api_LimitRangeItem is an autogenerated conversion function. -func Convert_v1_LimitRangeItem_To_api_LimitRangeItem(in *v1.LimitRangeItem, out *api.LimitRangeItem, s conversion.Scope) error { - return autoConvert_v1_LimitRangeItem_To_api_LimitRangeItem(in, out, s) -} - -func autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - out.Type = v1.LimitType(in.Type) - out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) - out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) - out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) - out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) - out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) - return nil -} - -// Convert_api_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. -func Convert_api_LimitRangeItem_To_v1_LimitRangeItem(in *api.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { - return autoConvert_api_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) -} - -func autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_LimitRangeList_To_api_LimitRangeList is an autogenerated conversion function. -func Convert_v1_LimitRangeList_To_api_LimitRangeList(in *v1.LimitRangeList, out *api.LimitRangeList, s conversion.Scope) error { - return autoConvert_v1_LimitRangeList_To_api_LimitRangeList(in, out, s) -} - -func autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. -func Convert_api_LimitRangeList_To_v1_LimitRangeList(in *api.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { - return autoConvert_api_LimitRangeList_To_v1_LimitRangeList(in, out, s) -} - -func autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]api.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec is an autogenerated conversion function. -func Convert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in *v1.LimitRangeSpec, out *api.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_v1_LimitRangeSpec_To_api_LimitRangeSpec(in, out, s) -} - -func autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) - return nil -} - -// Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. -func Convert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in *api.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { - return autoConvert_api_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) -} - -func autoConvert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.Object, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_List_To_api_List is an autogenerated conversion function. -func Convert_v1_List_To_api_List(in *v1.List, out *api.List, s conversion.Scope) error { - return autoConvert_v1_List_To_api_List(in, out, s) -} - -func autoConvert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_List_To_v1_List is an autogenerated conversion function. -func Convert_api_List_To_v1_List(in *api.List, out *v1.List, s conversion.Scope) error { - return autoConvert_api_List_To_v1_List(in, out, s) -} - -func autoConvert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_v1_ListOptions_To_api_ListOptions is an autogenerated conversion function. -func Convert_v1_ListOptions_To_api_ListOptions(in *v1.ListOptions, out *api.ListOptions, s conversion.Scope) error { - return autoConvert_v1_ListOptions_To_api_ListOptions(in, out, s) -} - -func autoConvert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { - return err - } - if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { - return err - } - out.IncludeUninitialized = in.IncludeUninitialized - out.Watch = in.Watch - out.ResourceVersion = in.ResourceVersion - out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) - return nil -} - -// Convert_api_ListOptions_To_v1_ListOptions is an autogenerated conversion function. -func Convert_api_ListOptions_To_v1_ListOptions(in *api.ListOptions, out *v1.ListOptions, s conversion.Scope) error { - return autoConvert_api_ListOptions_To_v1_ListOptions(in, out, s) -} - -func autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress is an autogenerated conversion function. -func Convert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *api.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerIngress_To_api_LoadBalancerIngress(in, out, s) -} - -func autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - out.IP = in.IP - out.Hostname = in.Hostname - return nil -} - -// Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. -func Convert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *api.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { - return autoConvert_api_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) -} - -func autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]api.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus is an autogenerated conversion function. -func Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *api.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(in, out, s) -} - -func autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) - return nil -} - -// Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. -func Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *api.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { - return autoConvert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) -} - -func autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_v1_LocalObjectReference_To_api_LocalObjectReference is an autogenerated conversion function. -func Convert_v1_LocalObjectReference_To_api_LocalObjectReference(in *v1.LocalObjectReference, out *api.LocalObjectReference, s conversion.Scope) error { - return autoConvert_v1_LocalObjectReference_To_api_LocalObjectReference(in, out, s) -} - -func autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - out.Name = in.Name - return nil -} - -// Convert_api_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. -func Convert_api_LocalObjectReference_To_v1_LocalObjectReference(in *api.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { - return autoConvert_api_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) -} - -func autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource is an autogenerated conversion function. -func Convert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in *v1.LocalVolumeSource, out *api.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_v1_LocalVolumeSource_To_api_LocalVolumeSource(in, out, s) -} - -func autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. -func Convert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in *api.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { - return autoConvert_api_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) -} - -func autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource is an autogenerated conversion function. -func Convert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in *v1.NFSVolumeSource, out *api.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_NFSVolumeSource_To_api_NFSVolumeSource(in, out, s) -} - -func autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - out.Server = in.Server - out.Path = in.Path - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. -func Convert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in *api.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { - return autoConvert_api_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) -} - -func autoConvert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NamespaceSpec_To_api_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NamespaceStatus_To_api_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Namespace_To_api_Namespace is an autogenerated conversion function. -func Convert_v1_Namespace_To_api_Namespace(in *v1.Namespace, out *api.Namespace, s conversion.Scope) error { - return autoConvert_v1_Namespace_To_api_Namespace(in, out, s) -} - -func autoConvert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Namespace_To_v1_Namespace is an autogenerated conversion function. -func Convert_api_Namespace_To_v1_Namespace(in *api.Namespace, out *v1.Namespace, s conversion.Scope) error { - return autoConvert_api_Namespace_To_v1_Namespace(in, out, s) -} - -func autoConvert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NamespaceList_To_api_NamespaceList is an autogenerated conversion function. -func Convert_v1_NamespaceList_To_api_NamespaceList(in *v1.NamespaceList, out *api.NamespaceList, s conversion.Scope) error { - return autoConvert_v1_NamespaceList_To_api_NamespaceList(in, out, s) -} - -func autoConvert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. -func Convert_api_NamespaceList_To_v1_NamespaceList(in *api.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { - return autoConvert_api_NamespaceList_To_v1_NamespaceList(in, out, s) -} - -func autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]api.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_v1_NamespaceSpec_To_api_NamespaceSpec is an autogenerated conversion function. -func Convert_v1_NamespaceSpec_To_api_NamespaceSpec(in *v1.NamespaceSpec, out *api.NamespaceSpec, s conversion.Scope) error { - return autoConvert_v1_NamespaceSpec_To_api_NamespaceSpec(in, out, s) -} - -func autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) - return nil -} - -// Convert_api_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. -func Convert_api_NamespaceSpec_To_v1_NamespaceSpec(in *api.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { - return autoConvert_api_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) -} - -func autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - out.Phase = api.NamespacePhase(in.Phase) - return nil -} - -// Convert_v1_NamespaceStatus_To_api_NamespaceStatus is an autogenerated conversion function. -func Convert_v1_NamespaceStatus_To_api_NamespaceStatus(in *v1.NamespaceStatus, out *api.NamespaceStatus, s conversion.Scope) error { - return autoConvert_v1_NamespaceStatus_To_api_NamespaceStatus(in, out, s) -} - -func autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - out.Phase = v1.NamespacePhase(in.Phase) - return nil -} - -// Convert_api_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. -func Convert_api_NamespaceStatus_To_v1_NamespaceStatus(in *api.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { - return autoConvert_api_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) -} - -func autoConvert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_NodeSpec_To_api_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_NodeStatus_To_api_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Node_To_api_Node is an autogenerated conversion function. -func Convert_v1_Node_To_api_Node(in *v1.Node, out *api.Node, s conversion.Scope) error { - return autoConvert_v1_Node_To_api_Node(in, out, s) -} - -func autoConvert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Node_To_v1_Node is an autogenerated conversion function. -func Convert_api_Node_To_v1_Node(in *api.Node, out *v1.Node, s conversion.Scope) error { - return autoConvert_api_Node_To_v1_Node(in, out, s) -} - -func autoConvert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - out.Type = api.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_v1_NodeAddress_To_api_NodeAddress is an autogenerated conversion function. -func Convert_v1_NodeAddress_To_api_NodeAddress(in *v1.NodeAddress, out *api.NodeAddress, s conversion.Scope) error { - return autoConvert_v1_NodeAddress_To_api_NodeAddress(in, out, s) -} - -func autoConvert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - out.Type = v1.NodeAddressType(in.Type) - out.Address = in.Address - return nil -} - -// Convert_api_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. -func Convert_api_NodeAddress_To_v1_NodeAddress(in *api.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { - return autoConvert_api_NodeAddress_To_v1_NodeAddress(in, out, s) -} - -func autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*api.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_NodeAffinity_To_api_NodeAffinity is an autogenerated conversion function. -func Convert_v1_NodeAffinity_To_api_NodeAffinity(in *v1.NodeAffinity, out *api.NodeAffinity, s conversion.Scope) error { - return autoConvert_v1_NodeAffinity_To_api_NodeAffinity(in, out, s) -} - -func autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. -func Convert_api_NodeAffinity_To_v1_NodeAffinity(in *api.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { - return autoConvert_api_NodeAffinity_To_v1_NodeAffinity(in, out, s) -} - -func autoConvert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - out.Type = api.NodeConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_NodeCondition_To_api_NodeCondition is an autogenerated conversion function. -func Convert_v1_NodeCondition_To_api_NodeCondition(in *v1.NodeCondition, out *api.NodeCondition, s conversion.Scope) error { - return autoConvert_v1_NodeCondition_To_api_NodeCondition(in, out, s) -} - -func autoConvert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - out.Type = v1.NodeConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastHeartbeatTime = in.LastHeartbeatTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. -func Convert_api_NodeCondition_To_v1_NodeCondition(in *api.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { - return autoConvert_api_NodeCondition_To_v1_NodeCondition(in, out, s) -} - -func autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*api.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_v1_NodeConfigSource_To_api_NodeConfigSource is an autogenerated conversion function. -func Convert_v1_NodeConfigSource_To_api_NodeConfigSource(in *v1.NodeConfigSource, out *api.NodeConfigSource, s conversion.Scope) error { - return autoConvert_v1_NodeConfigSource_To_api_NodeConfigSource(in, out, s) -} - -func autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) - return nil -} - -// Convert_api_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. -func Convert_api_NodeConfigSource_To_v1_NodeConfigSource(in *api.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { - return autoConvert_api_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) -} - -func autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_v1_DaemonEndpoint_To_api_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *api.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - if err := Convert_api_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { - return err - } - return nil -} - -// Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. -func Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *api.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { - return autoConvert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) -} - -func autoConvert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_NodeList_To_api_NodeList is an autogenerated conversion function. -func Convert_v1_NodeList_To_api_NodeList(in *v1.NodeList, out *api.NodeList, s conversion.Scope) error { - return autoConvert_v1_NodeList_To_api_NodeList(in, out, s) -} - -func autoConvert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_NodeList_To_v1_NodeList is an autogenerated conversion function. -func Convert_api_NodeList_To_v1_NodeList(in *api.NodeList, out *v1.NodeList, s conversion.Scope) error { - return autoConvert_api_NodeList_To_v1_NodeList(in, out, s) -} - -func autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions is an autogenerated conversion function. -func Convert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in *v1.NodeProxyOptions, out *api.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_v1_NodeProxyOptions_To_api_NodeProxyOptions(in, out, s) -} - -func autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. -func Convert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in *api.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { - return autoConvert_api_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) -} - -func autoConvert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_v1_NodeResources_To_api_NodeResources is an autogenerated conversion function. -func Convert_v1_NodeResources_To_api_NodeResources(in *v1.NodeResources, out *api.NodeResources, s conversion.Scope) error { - return autoConvert_v1_NodeResources_To_api_NodeResources(in, out, s) -} - -func autoConvert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - return nil -} - -// Convert_api_NodeResources_To_v1_NodeResources is an autogenerated conversion function. -func Convert_api_NodeResources_To_v1_NodeResources(in *api.NodeResources, out *v1.NodeResources, s conversion.Scope) error { - return autoConvert_api_NodeResources_To_v1_NodeResources(in, out, s) -} - -func autoConvert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]api.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_v1_NodeSelector_To_api_NodeSelector is an autogenerated conversion function. -func Convert_v1_NodeSelector_To_api_NodeSelector(in *v1.NodeSelector, out *api.NodeSelector, s conversion.Scope) error { - return autoConvert_v1_NodeSelector_To_api_NodeSelector(in, out, s) -} - -func autoConvert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) - return nil -} - -// Convert_api_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. -func Convert_api_NodeSelector_To_v1_NodeSelector(in *api.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { - return autoConvert_api_NodeSelector_To_v1_NodeSelector(in, out, s) -} - -func autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *api.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorRequirement_To_api_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.NodeSelectorOperator(in.Operator) - out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) - return nil -} - -// Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. -func Convert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *api.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { - return autoConvert_api_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) -} - -func autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]api.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm is an autogenerated conversion function. -func Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *api.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(in, out, s) -} - -func autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) - return nil -} - -// Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. -func Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *api.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { - return autoConvert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) -} - -func autoConvert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]api.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*api.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_v1_NodeSpec_To_api_NodeSpec is an autogenerated conversion function. -func Convert_v1_NodeSpec_To_api_NodeSpec(in *v1.NodeSpec, out *api.NodeSpec, s conversion.Scope) error { - return autoConvert_v1_NodeSpec_To_api_NodeSpec(in, out, s) -} - -func autoConvert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - out.PodCIDR = in.PodCIDR - out.ExternalID = in.ExternalID - out.ProviderID = in.ProviderID - out.Unschedulable = in.Unschedulable - out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) - out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) - return nil -} - -// Convert_api_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. -func Convert_api_NodeSpec_To_v1_NodeSpec(in *api.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { - return autoConvert_api_NodeSpec_To_v1_NodeSpec(in, out, s) -} - -func autoConvert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*api.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = api.NodePhase(in.Phase) - out.Conditions = *(*[]api.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]api.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_v1_NodeDaemonEndpoints_To_api_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]api.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]api.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]api.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_v1_NodeStatus_To_api_NodeStatus is an autogenerated conversion function. -func Convert_v1_NodeStatus_To_api_NodeStatus(in *v1.NodeStatus, out *api.NodeStatus, s conversion.Scope) error { - return autoConvert_v1_NodeStatus_To_api_NodeStatus(in, out, s) -} - -func autoConvert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) - out.Phase = v1.NodePhase(in.Phase) - out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) - out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) - if err := Convert_api_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { - return err - } - if err := Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { - return err - } - out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) - out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) - out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) - return nil -} - -// Convert_api_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. -func Convert_api_NodeStatus_To_v1_NodeStatus(in *api.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { - return autoConvert_api_NodeStatus_To_v1_NodeStatus(in, out, s) -} - -func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo is an autogenerated conversion function. -func Convert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *v1.NodeSystemInfo, out *api.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in, out, s) -} - -func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - out.MachineID = in.MachineID - out.SystemUUID = in.SystemUUID - out.BootID = in.BootID - out.KernelVersion = in.KernelVersion - out.OSImage = in.OSImage - out.ContainerRuntimeVersion = in.ContainerRuntimeVersion - out.KubeletVersion = in.KubeletVersion - out.KubeProxyVersion = in.KubeProxyVersion - out.OperatingSystem = in.OperatingSystem - out.Architecture = in.Architecture - return nil -} - -// Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. -func Convert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { - return autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) -} - -func autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector is an autogenerated conversion function. -func Convert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *api.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ObjectFieldSelector_To_api_ObjectFieldSelector(in, out, s) -} - -func autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - out.APIVersion = in.APIVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. -func Convert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *api.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { - return autoConvert_api_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) -} - -func autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_v1_ObjectMeta_To_api_ObjectMeta is an autogenerated conversion function. -func Convert_v1_ObjectMeta_To_api_ObjectMeta(in *v1.ObjectMeta, out *api.ObjectMeta, s conversion.Scope) error { - return autoConvert_v1_ObjectMeta_To_api_ObjectMeta(in, out, s) -} - -func autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - out.Name = in.Name - out.GenerateName = in.GenerateName - out.Namespace = in.Namespace - out.SelfLink = in.SelfLink - out.UID = types.UID(in.UID) - out.ResourceVersion = in.ResourceVersion - out.Generation = in.Generation - out.CreationTimestamp = in.CreationTimestamp - out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) - out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) - out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) - out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) - out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) - out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) - out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) - out.ClusterName = in.ClusterName - return nil -} - -// Convert_api_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. -func Convert_api_ObjectMeta_To_v1_ObjectMeta(in *api.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { - return autoConvert_api_ObjectMeta_To_v1_ObjectMeta(in, out, s) -} - -func autoConvert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_v1_ObjectReference_To_api_ObjectReference is an autogenerated conversion function. -func Convert_v1_ObjectReference_To_api_ObjectReference(in *v1.ObjectReference, out *api.ObjectReference, s conversion.Scope) error { - return autoConvert_v1_ObjectReference_To_api_ObjectReference(in, out, s) -} - -func autoConvert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - out.Kind = in.Kind - out.Namespace = in.Namespace - out.Name = in.Name - out.UID = types.UID(in.UID) - out.APIVersion = in.APIVersion - out.ResourceVersion = in.ResourceVersion - out.FieldPath = in.FieldPath - return nil -} - -// Convert_api_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. -func Convert_api_ObjectReference_To_v1_ObjectReference(in *api.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { - return autoConvert_api_ObjectReference_To_v1_ObjectReference(in, out, s) -} - -func autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolume_To_api_PersistentVolume is an autogenerated conversion function. -func Convert_v1_PersistentVolume_To_api_PersistentVolume(in *v1.PersistentVolume, out *api.PersistentVolume, s conversion.Scope) error { - return autoConvert_v1_PersistentVolume_To_api_PersistentVolume(in, out, s) -} - -func autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. -func Convert_api_PersistentVolume_To_v1_PersistentVolume(in *api.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { - return autoConvert_api_PersistentVolume_To_v1_PersistentVolume(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *api.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaim_To_api_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *api.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = api.PersistentVolumeClaimConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *api.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimCondition_To_api_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - out.Type = v1.PersistentVolumeClaimConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *api.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *api.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimList_To_api_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *api.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_v1_ResourceRequirements_To_api_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *api.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimSpec_To_api_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) - if err := Convert_api_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { - return err - } - out.VolumeName = in.VolumeName - out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) - return nil -} - -// Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *api.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]api.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *api.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimStatus_To_api_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *api.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) -} - -func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *api.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_api_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - out.ClaimName = in.ClaimName - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *api.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_v1_PersistentVolume_To_api_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList is an autogenerated conversion function. -func Convert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in *v1.PersistentVolumeList, out *api.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeList_To_api_PersistentVolumeList(in, out, s) -} - -func autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PersistentVolume, len(*in)) - for i := range *in { - if err := Convert_api_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. -func Convert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in *api.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*api.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.AzureFile = (*api.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*api.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*api.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *api.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in, out, s) -} - -func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) - out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. -func Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*api.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]api.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*api.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = api.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *api.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeSpec_To_api_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) - if err := Convert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { - return err - } - out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) - out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) - out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) - out.StorageClassName = in.StorageClassName - out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) - return nil -} - -// Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. -func Convert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *api.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) -} - -func autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = api.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *api.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_v1_PersistentVolumeStatus_To_api_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - out.Phase = v1.PersistentVolumePhase(in.Phase) - out.Message = in.Message - out.Reason = in.Reason - return nil -} - -// Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. -func Convert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *api.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { - return autoConvert_api_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) -} - -func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *api.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_api_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - out.PdID = in.PdID - out.FSType = in.FSType - return nil -} - -// Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. -func Convert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *api.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Pod_To_api_Pod is an autogenerated conversion function. -func Convert_v1_Pod_To_api_Pod(in *v1.Pod, out *api.Pod, s conversion.Scope) error { - return autoConvert_v1_Pod_To_api_Pod(in, out, s) -} - -func autoConvert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAffinity_To_api_PodAffinity is an autogenerated conversion function. -func Convert_v1_PodAffinity_To_api_PodAffinity(in *v1.PodAffinity, out *api.PodAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAffinity_To_api_PodAffinity(in, out, s) -} - -func autoConvert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. -func Convert_api_PodAffinity_To_v1_PodAffinity(in *api.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { - return autoConvert_api_PodAffinity_To_v1_PodAffinity(in, out, s) -} - -func autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm is an autogenerated conversion function. -func Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in *v1.PodAffinityTerm, out *api.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_PodAffinityTerm_To_api_PodAffinityTerm(in, out, s) -} - -func autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - out.TopologyKey = in.TopologyKey - return nil -} - -// Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. -func Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in *api.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) -} - -func autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]api.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]api.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity is an autogenerated conversion function. -func Convert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in *v1.PodAntiAffinity, out *api.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_v1_PodAntiAffinity_To_api_PodAntiAffinity(in, out, s) -} - -func autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) - out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) - return nil -} - -// Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. -func Convert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in *api.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { - return autoConvert_api_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) -} - -func autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_v1_PodAttachOptions_To_api_PodAttachOptions is an autogenerated conversion function. -func Convert_v1_PodAttachOptions_To_api_PodAttachOptions(in *v1.PodAttachOptions, out *api.PodAttachOptions, s conversion.Scope) error { - return autoConvert_v1_PodAttachOptions_To_api_PodAttachOptions(in, out, s) -} - -func autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - return nil -} - -// Convert_api_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. -func Convert_api_PodAttachOptions_To_v1_PodAttachOptions(in *api.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { - return autoConvert_api_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) -} - -func autoConvert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - out.Type = api.PodConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PodCondition_To_api_PodCondition is an autogenerated conversion function. -func Convert_v1_PodCondition_To_api_PodCondition(in *v1.PodCondition, out *api.PodCondition, s conversion.Scope) error { - return autoConvert_v1_PodCondition_To_api_PodCondition(in, out, s) -} - -func autoConvert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - out.Type = v1.PodConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastProbeTime = in.LastProbeTime - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PodCondition_To_v1_PodCondition is an autogenerated conversion function. -func Convert_api_PodCondition_To_v1_PodCondition(in *api.PodCondition, out *v1.PodCondition, s conversion.Scope) error { - return autoConvert_api_PodCondition_To_v1_PodCondition(in, out, s) -} - -func autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_v1_PodExecOptions_To_api_PodExecOptions is an autogenerated conversion function. -func Convert_v1_PodExecOptions_To_api_PodExecOptions(in *v1.PodExecOptions, out *api.PodExecOptions, s conversion.Scope) error { - return autoConvert_v1_PodExecOptions_To_api_PodExecOptions(in, out, s) -} - -func autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - out.Stdin = in.Stdin - out.Stdout = in.Stdout - out.Stderr = in.Stderr - out.TTY = in.TTY - out.Container = in.Container - out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) - return nil -} - -// Convert_api_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. -func Convert_api_PodExecOptions_To_v1_PodExecOptions(in *api.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { - return autoConvert_api_PodExecOptions_To_v1_PodExecOptions(in, out, s) -} - -func autoConvert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Pod, len(*in)) - for i := range *in { - if err := Convert_v1_Pod_To_api_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodList_To_api_PodList is an autogenerated conversion function. -func Convert_v1_PodList_To_api_PodList(in *v1.PodList, out *api.PodList, s conversion.Scope) error { - return autoConvert_v1_PodList_To_api_PodList(in, out, s) -} - -func autoConvert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Pod, len(*in)) - for i := range *in { - if err := Convert_api_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodList_To_v1_PodList is an autogenerated conversion function. -func Convert_api_PodList_To_v1_PodList(in *api.PodList, out *v1.PodList, s conversion.Scope) error { - return autoConvert_api_PodList_To_v1_PodList(in, out, s) -} - -func autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_v1_PodLogOptions_To_api_PodLogOptions is an autogenerated conversion function. -func Convert_v1_PodLogOptions_To_api_PodLogOptions(in *v1.PodLogOptions, out *api.PodLogOptions, s conversion.Scope) error { - return autoConvert_v1_PodLogOptions_To_api_PodLogOptions(in, out, s) -} - -func autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - out.Container = in.Container - out.Follow = in.Follow - out.Previous = in.Previous - out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) - out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) - out.Timestamps = in.Timestamps - out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) - out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) - return nil -} - -// Convert_api_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. -func Convert_api_PodLogOptions_To_v1_PodLogOptions(in *api.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { - return autoConvert_api_PodLogOptions_To_v1_PodLogOptions(in, out, s) -} - -func autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions is an autogenerated conversion function. -func Convert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *api.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_v1_PodPortForwardOptions_To_api_PodPortForwardOptions(in, out, s) -} - -func autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) - return nil -} - -// Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. -func Convert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *api.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { - return autoConvert_api_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) -} - -func autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_PodProxyOptions_To_api_PodProxyOptions is an autogenerated conversion function. -func Convert_v1_PodProxyOptions_To_api_PodProxyOptions(in *v1.PodProxyOptions, out *api.PodProxyOptions, s conversion.Scope) error { - return autoConvert_v1_PodProxyOptions_To_api_PodProxyOptions(in, out, s) -} - -func autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. -func Convert_api_PodProxyOptions_To_v1_PodProxyOptions(in *api.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { - return autoConvert_api_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) -} - -func autoConvert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) - out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) - return nil -} - -func autoConvert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_v1_PodSignature_To_api_PodSignature is an autogenerated conversion function. -func Convert_v1_PodSignature_To_api_PodSignature(in *v1.PodSignature, out *api.PodSignature, s conversion.Scope) error { - return autoConvert_v1_PodSignature_To_api_PodSignature(in, out, s) -} - -func autoConvert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) - return nil -} - -// Convert_api_PodSignature_To_v1_PodSignature is an autogenerated conversion function. -func Convert_api_PodSignature_To_v1_PodSignature(in *api.PodSignature, out *v1.PodSignature, s conversion.Scope) error { - return autoConvert_api_PodSignature_To_v1_PodSignature(in, out, s) -} - -func autoConvert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]api.Volume, len(*in)) - for i := range *in { - if err := Convert_v1_Volume_To_api_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]api.Container, len(*in)) - for i := range *in { - if err := Convert_v1_Container_To_api_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = api.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = api.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - // INFO: in.DeprecatedServiceAccount opted out of conversion generation - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - // INFO: in.HostNetwork opted out of conversion generation - // INFO: in.HostPID opted out of conversion generation - // INFO: in.HostIPC opted out of conversion generation - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(api.PodSecurityContext) - if err := Convert_v1_PodSecurityContext_To_api_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*api.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]api.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]api.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - if err := Convert_api_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Volumes = nil - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.InitContainers = nil - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]v1.Container, len(*in)) - for i := range *in { - if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Containers = nil - } - out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) - out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) - out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) - out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) - out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) - out.ServiceAccountName = in.ServiceAccountName - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - out.NodeName = in.NodeName - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(v1.PodSecurityContext) - if err := Convert_api_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { - return err - } - } else { - out.SecurityContext = nil - } - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.Hostname = in.Hostname - out.Subdomain = in.Subdomain - out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) - out.SchedulerName = in.SchedulerName - out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) - out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) - out.PriorityClassName = in.PriorityClassName - out.Priority = (*int32)(unsafe.Pointer(in.Priority)) - return nil -} - -func autoConvert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - out.Phase = api.PodPhase(in.Phase) - out.Conditions = *(*[]api.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.InitContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]api.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - out.QOSClass = api.PodQOSClass(in.QOSClass) - return nil -} - -// Convert_v1_PodStatus_To_api_PodStatus is an autogenerated conversion function. -func Convert_v1_PodStatus_To_api_PodStatus(in *v1.PodStatus, out *api.PodStatus, s conversion.Scope) error { - return autoConvert_v1_PodStatus_To_api_PodStatus(in, out, s) -} - -func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - out.Phase = v1.PodPhase(in.Phase) - out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) - out.Message = in.Message - out.Reason = in.Reason - out.HostIP = in.HostIP - out.PodIP = in.PodIP - out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) - out.QOSClass = v1.PodQOSClass(in.QOSClass) - out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) - out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) - return nil -} - -// Convert_api_PodStatus_To_v1_PodStatus is an autogenerated conversion function. -func Convert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *v1.PodStatus, s conversion.Scope) error { - return autoConvert_api_PodStatus_To_v1_PodStatus(in, out, s) -} - -func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodStatus_To_api_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodStatusResult_To_api_PodStatusResult is an autogenerated conversion function. -func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *v1.PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error { - return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s) -} - -func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. -func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { - return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s) -} - -func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PodTemplate_To_api_PodTemplate is an autogenerated conversion function. -func Convert_v1_PodTemplate_To_api_PodTemplate(in *v1.PodTemplate, out *api.PodTemplate, s conversion.Scope) error { - return autoConvert_v1_PodTemplate_To_api_PodTemplate(in, out, s) -} - -func autoConvert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { - return err - } - return nil -} - -// Convert_api_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. -func Convert_api_PodTemplate_To_v1_PodTemplate(in *api.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { - return autoConvert_api_PodTemplate_To_v1_PodTemplate(in, out, s) -} - -func autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_v1_PodTemplate_To_api_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_PodTemplateList_To_api_PodTemplateList is an autogenerated conversion function. -func Convert_v1_PodTemplateList_To_api_PodTemplateList(in *v1.PodTemplateList, out *api.PodTemplateList, s conversion.Scope) error { - return autoConvert_v1_PodTemplateList_To_api_PodTemplateList(in, out, s) -} - -func autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.PodTemplate, len(*in)) - for i := range *in { - if err := Convert_api_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. -func Convert_api_PodTemplateList_To_v1_PodTemplateList(in *api.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { - return autoConvert_api_PodTemplateList_To_v1_PodTemplateList(in, out, s) -} - -func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_PodSpec_To_api_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource is an autogenerated conversion function. -func Convert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *api.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_v1_PortworxVolumeSource_To_api_PortworxVolumeSource(in, out, s) -} - -func autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - out.VolumeID = in.VolumeID - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. -func Convert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *api.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { - return autoConvert_api_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) -} - -func autoConvert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_v1_Preconditions_To_api_Preconditions is an autogenerated conversion function. -func Convert_v1_Preconditions_To_api_Preconditions(in *v1.Preconditions, out *api.Preconditions, s conversion.Scope) error { - return autoConvert_v1_Preconditions_To_api_Preconditions(in, out, s) -} - -func autoConvert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - out.UID = (*types.UID)(unsafe.Pointer(in.UID)) - return nil -} - -// Convert_api_Preconditions_To_v1_Preconditions is an autogenerated conversion function. -func Convert_api_Preconditions_To_v1_Preconditions(in *api.Preconditions, out *v1.Preconditions, s conversion.Scope) error { - return autoConvert_api_Preconditions_To_v1_Preconditions(in, out, s) -} - -func autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_v1_PodSignature_To_api_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *api.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_v1_PreferAvoidPodsEntry_To_api_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - if err := Convert_api_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { - return err - } - out.EvictionTime = in.EvictionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. -func Convert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *api.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { - return autoConvert_api_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) -} - -func autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_NodeSelectorTerm_To_api_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *api.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_v1_PreferredSchedulingTerm_To_api_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { - return err - } - return nil -} - -// Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. -func Convert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *api.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { - return autoConvert_api_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) -} - -func autoConvert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - if err := Convert_v1_Handler_To_api_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_v1_Probe_To_api_Probe is an autogenerated conversion function. -func Convert_v1_Probe_To_api_Probe(in *v1.Probe, out *api.Probe, s conversion.Scope) error { - return autoConvert_v1_Probe_To_api_Probe(in, out, s) -} - -func autoConvert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - if err := Convert_api_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { - return err - } - out.InitialDelaySeconds = in.InitialDelaySeconds - out.TimeoutSeconds = in.TimeoutSeconds - out.PeriodSeconds = in.PeriodSeconds - out.SuccessThreshold = in.SuccessThreshold - out.FailureThreshold = in.FailureThreshold - return nil -} - -// Convert_api_Probe_To_v1_Probe is an autogenerated conversion function. -func Convert_api_Probe_To_v1_Probe(in *api.Probe, out *v1.Probe, s conversion.Scope) error { - return autoConvert_api_Probe_To_v1_Probe(in, out, s) -} - -func autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]api.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *api.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ProjectedVolumeSource_To_api_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - return nil -} - -// Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. -func Convert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *api.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { - return autoConvert_api_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) -} - -func autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *api.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_v1_QuobyteVolumeSource_To_api_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - out.Registry = in.Registry - out.Volume = in.Volume - out.ReadOnly = in.ReadOnly - out.User = in.User - out.Group = in.Group - return nil -} - -// Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. -func Convert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *api.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { - return autoConvert_api_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *api.RBDPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDPersistentVolumeSource_To_api_RBDPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *api.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource is an autogenerated conversion function. -func Convert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in *v1.RBDVolumeSource, out *api.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_v1_RBDVolumeSource_To_api_RBDVolumeSource(in, out, s) -} - -func autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) - out.RBDImage = in.RBDImage - out.FSType = in.FSType - out.RBDPool = in.RBDPool - out.RadosUser = in.RadosUser - out.Keyring = in.Keyring - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. -func Convert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in *api.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { - return autoConvert_api_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) -} - -func autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_v1_RangeAllocation_To_api_RangeAllocation is an autogenerated conversion function. -func Convert_v1_RangeAllocation_To_api_RangeAllocation(in *v1.RangeAllocation, out *api.RangeAllocation, s conversion.Scope) error { - return autoConvert_v1_RangeAllocation_To_api_RangeAllocation(in, out, s) -} - -func autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Range = in.Range - out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) - return nil -} - -// Convert_api_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. -func Convert_api_RangeAllocation_To_v1_RangeAllocation(in *api.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { - return autoConvert_api_RangeAllocation_To_v1_RangeAllocation(in, out, s) -} - -func autoConvert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ReplicationController_To_api_ReplicationController is an autogenerated conversion function. -func Convert_v1_ReplicationController_To_api_ReplicationController(in *v1.ReplicationController, out *api.ReplicationController, s conversion.Scope) error { - return autoConvert_v1_ReplicationController_To_api_ReplicationController(in, out, s) -} - -func autoConvert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. -func Convert_api_ReplicationController_To_v1_ReplicationController(in *api.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { - return autoConvert_api_ReplicationController_To_v1_ReplicationController(in, out, s) -} - -func autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = api.ReplicationControllerConditionType(in.Type) - out.Status = api.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *api.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerCondition_To_api_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - out.Type = v1.ReplicationControllerConditionType(in.Type) - out.Status = v1.ConditionStatus(in.Status) - out.LastTransitionTime = in.LastTransitionTime - out.Reason = in.Reason - out.Message = in.Message - return nil -} - -// Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. -func Convert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *api.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) -} - -func autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_v1_ReplicationController_To_api_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList is an autogenerated conversion function. -func Convert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in *v1.ReplicationControllerList, out *api.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerList_To_api_ReplicationControllerList(in, out, s) -} - -func autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.ReplicationController, len(*in)) - for i := range *in { - if err := Convert_api_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. -func Convert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in *api.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) -} - -func autoConvert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { - if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { - return err - } - out.MinReadySeconds = in.MinReadySeconds - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { - return err - } - } else { - out.Template = nil - } - return nil -} - -func autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]api.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *api.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_v1_ReplicationControllerStatus_To_api_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - out.Replicas = in.Replicas - out.FullyLabeledReplicas = in.FullyLabeledReplicas - out.ReadyReplicas = in.ReadyReplicas - out.AvailableReplicas = in.AvailableReplicas - out.ObservedGeneration = in.ObservedGeneration - out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) - return nil -} - -// Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. -func Convert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *api.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { - return autoConvert_api_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) -} - -func autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector is an autogenerated conversion function. -func Convert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *api.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_v1_ResourceFieldSelector_To_api_ResourceFieldSelector(in, out, s) -} - -func autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - out.ContainerName = in.ContainerName - out.Resource = in.Resource - out.Divisor = in.Divisor - return nil -} - -// Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. -func Convert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *api.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { - return autoConvert_api_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) -} - -func autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ResourceQuota_To_api_ResourceQuota is an autogenerated conversion function. -func Convert_v1_ResourceQuota_To_api_ResourceQuota(in *v1.ResourceQuota, out *api.ResourceQuota, s conversion.Scope) error { - return autoConvert_v1_ResourceQuota_To_api_ResourceQuota(in, out, s) -} - -func autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. -func Convert_api_ResourceQuota_To_v1_ResourceQuota(in *api.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { - return autoConvert_api_ResourceQuota_To_v1_ResourceQuota(in, out, s) -} - -func autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList is an autogenerated conversion function. -func Convert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in *v1.ResourceQuotaList, out *api.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaList_To_api_ResourceQuotaList(in, out, s) -} - -func autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. -func Convert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in *api.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) -} - -func autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]api.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *api.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaSpec_To_api_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) - return nil -} - -// Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. -func Convert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *api.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) -} - -func autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*api.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*api.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *api.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_v1_ResourceQuotaStatus_To_api_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) - out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) - return nil -} - -// Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. -func Convert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *api.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { - return autoConvert_api_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) -} - -func autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*api.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*api.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_v1_ResourceRequirements_To_api_ResourceRequirements is an autogenerated conversion function. -func Convert_v1_ResourceRequirements_To_api_ResourceRequirements(in *v1.ResourceRequirements, out *api.ResourceRequirements, s conversion.Scope) error { - return autoConvert_v1_ResourceRequirements_To_api_ResourceRequirements(in, out, s) -} - -func autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) - out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) - return nil -} - -// Convert_api_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. -func Convert_api_ResourceRequirements_To_v1_ResourceRequirements(in *api.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { - return autoConvert_api_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) -} - -func autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_v1_SELinuxOptions_To_api_SELinuxOptions is an autogenerated conversion function. -func Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in *v1.SELinuxOptions, out *api.SELinuxOptions, s conversion.Scope) error { - return autoConvert_v1_SELinuxOptions_To_api_SELinuxOptions(in, out, s) -} - -func autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - out.User = in.User - out.Role = in.Role - out.Type = in.Type - out.Level = in.Level - return nil -} - -// Convert_api_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. -func Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in *api.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { - return autoConvert_api_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) -} - -func autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *api.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOPersistentVolumeSource_To_api_ScaleIOPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *api.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *api.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_v1_ScaleIOVolumeSource_To_api_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - out.Gateway = in.Gateway - out.System = in.System - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - out.SSLEnabled = in.SSLEnabled - out.ProtectionDomain = in.ProtectionDomain - out.StoragePool = in.StoragePool - out.StorageMode = in.StorageMode - out.VolumeName = in.VolumeName - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - return nil -} - -// Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. -func Convert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *api.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { - return autoConvert_api_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) -} - -func autoConvert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - // INFO: in.StringData opted out of conversion generation - out.Type = api.SecretType(in.Type) - return nil -} - -func autoConvert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) - out.Type = v1.SecretType(in.Type) - return nil -} - -// Convert_api_Secret_To_v1_Secret is an autogenerated conversion function. -func Convert_api_Secret_To_v1_Secret(in *api.Secret, out *v1.Secret, s conversion.Scope) error { - return autoConvert_api_Secret_To_v1_Secret(in, out, s) -} - -func autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretEnvSource_To_api_SecretEnvSource is an autogenerated conversion function. -func Convert_v1_SecretEnvSource_To_api_SecretEnvSource(in *v1.SecretEnvSource, out *api.SecretEnvSource, s conversion.Scope) error { - return autoConvert_v1_SecretEnvSource_To_api_SecretEnvSource(in, out, s) -} - -func autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. -func Convert_api_SecretEnvSource_To_v1_SecretEnvSource(in *api.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { - return autoConvert_api_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) -} - -func autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretKeySelector_To_api_SecretKeySelector is an autogenerated conversion function. -func Convert_v1_SecretKeySelector_To_api_SecretKeySelector(in *v1.SecretKeySelector, out *api.SecretKeySelector, s conversion.Scope) error { - return autoConvert_v1_SecretKeySelector_To_api_SecretKeySelector(in, out, s) -} - -func autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Key = in.Key - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. -func Convert_api_SecretKeySelector_To_v1_SecretKeySelector(in *api.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { - return autoConvert_api_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) -} - -func autoConvert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Secret, len(*in)) - for i := range *in { - if err := Convert_v1_Secret_To_api_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_SecretList_To_api_SecretList is an autogenerated conversion function. -func Convert_v1_SecretList_To_api_SecretList(in *v1.SecretList, out *api.SecretList, s conversion.Scope) error { - return autoConvert_v1_SecretList_To_api_SecretList(in, out, s) -} - -func autoConvert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Secret, len(*in)) - for i := range *in { - if err := Convert_api_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_SecretList_To_v1_SecretList is an autogenerated conversion function. -func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *v1.SecretList, s conversion.Scope) error { - return autoConvert_api_SecretList_To_v1_SecretList(in, out, s) -} - -func autoConvert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - if err := Convert_v1_LocalObjectReference_To_api_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretProjection_To_api_SecretProjection is an autogenerated conversion function. -func Convert_v1_SecretProjection_To_api_SecretProjection(in *v1.SecretProjection, out *api.SecretProjection, s conversion.Scope) error { - return autoConvert_v1_SecretProjection_To_api_SecretProjection(in, out, s) -} - -func autoConvert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - if err := Convert_api_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { - return err - } - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. -func Convert_api_SecretProjection_To_v1_SecretProjection(in *api.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { - return autoConvert_api_SecretProjection_To_v1_SecretProjection(in, out, s) -} - -func autoConvert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_v1_SecretReference_To_api_SecretReference is an autogenerated conversion function. -func Convert_v1_SecretReference_To_api_SecretReference(in *v1.SecretReference, out *api.SecretReference, s conversion.Scope) error { - return autoConvert_v1_SecretReference_To_api_SecretReference(in, out, s) -} - -func autoConvert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - out.Name = in.Name - out.Namespace = in.Namespace - return nil -} - -// Convert_api_SecretReference_To_v1_SecretReference is an autogenerated conversion function. -func Convert_api_SecretReference_To_v1_SecretReference(in *api.SecretReference, out *v1.SecretReference, s conversion.Scope) error { - return autoConvert_api_SecretReference_To_v1_SecretReference(in, out, s) -} - -func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]api.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource is an autogenerated conversion function. -func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *v1.SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in, out, s) -} - -func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - out.SecretName = in.SecretName - out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) - out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) - out.Optional = (*bool)(unsafe.Pointer(in.Optional)) - return nil -} - -// Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. -func Convert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { - return autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) -} - -func autoConvert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*api.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*api.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -// Convert_v1_SecurityContext_To_api_SecurityContext is an autogenerated conversion function. -func Convert_v1_SecurityContext_To_api_SecurityContext(in *v1.SecurityContext, out *api.SecurityContext, s conversion.Scope) error { - return autoConvert_v1_SecurityContext_To_api_SecurityContext(in, out, s) -} - -func autoConvert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { - out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) - out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) - out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) - out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) - out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) - out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) - out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) - return nil -} - -func autoConvert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - if err := Convert_v1_ObjectReference_To_api_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_v1_SerializedReference_To_api_SerializedReference is an autogenerated conversion function. -func Convert_v1_SerializedReference_To_api_SerializedReference(in *v1.SerializedReference, out *api.SerializedReference, s conversion.Scope) error { - return autoConvert_v1_SerializedReference_To_api_SerializedReference(in, out, s) -} - -func autoConvert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - if err := Convert_api_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { - return err - } - return nil -} - -// Convert_api_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. -func Convert_api_SerializedReference_To_v1_SerializedReference(in *api.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { - return autoConvert_api_SerializedReference_To_v1_SerializedReference(in, out, s) -} - -func autoConvert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1_ServiceSpec_To_api_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_v1_ServiceStatus_To_api_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Service_To_api_Service is an autogenerated conversion function. -func Convert_v1_Service_To_api_Service(in *v1.Service, out *api.Service, s conversion.Scope) error { - return autoConvert_v1_Service_To_api_Service(in, out, s) -} - -func autoConvert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_api_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - if err := Convert_api_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { - return err - } - return nil -} - -// Convert_api_Service_To_v1_Service is an autogenerated conversion function. -func Convert_api_Service_To_v1_Service(in *api.Service, out *v1.Service, s conversion.Scope) error { - return autoConvert_api_Service_To_v1_Service(in, out, s) -} - -func autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]api.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]api.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_v1_ServiceAccount_To_api_ServiceAccount is an autogenerated conversion function. -func Convert_v1_ServiceAccount_To_api_ServiceAccount(in *v1.ServiceAccount, out *api.ServiceAccount, s conversion.Scope) error { - return autoConvert_v1_ServiceAccount_To_api_ServiceAccount(in, out, s) -} - -func autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) - out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) - out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) - return nil -} - -// Convert_api_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. -func Convert_api_ServiceAccount_To_v1_ServiceAccount(in *api.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { - return autoConvert_api_ServiceAccount_To_v1_ServiceAccount(in, out, s) -} - -func autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]api.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1_ServiceAccountList_To_api_ServiceAccountList is an autogenerated conversion function. -func Convert_v1_ServiceAccountList_To_api_ServiceAccountList(in *v1.ServiceAccountList, out *api.ServiceAccountList, s conversion.Scope) error { - return autoConvert_v1_ServiceAccountList_To_api_ServiceAccountList(in, out, s) -} - -func autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_api_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. -func Convert_api_ServiceAccountList_To_v1_ServiceAccountList(in *api.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { - return autoConvert_api_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) -} - -func autoConvert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]api.Service, len(*in)) - for i := range *in { - if err := Convert_v1_Service_To_api_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_v1_ServiceList_To_api_ServiceList is an autogenerated conversion function. -func Convert_v1_ServiceList_To_api_ServiceList(in *v1.ServiceList, out *api.ServiceList, s conversion.Scope) error { - return autoConvert_v1_ServiceList_To_api_ServiceList(in, out, s) -} - -func autoConvert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]v1.Service, len(*in)) - for i := range *in { - if err := Convert_api_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Items = nil - } - return nil -} - -// Convert_api_ServiceList_To_v1_ServiceList is an autogenerated conversion function. -func Convert_api_ServiceList_To_v1_ServiceList(in *api.ServiceList, out *v1.ServiceList, s conversion.Scope) error { - return autoConvert_api_ServiceList_To_v1_ServiceList(in, out, s) -} - -func autoConvert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = api.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_v1_ServicePort_To_api_ServicePort is an autogenerated conversion function. -func Convert_v1_ServicePort_To_api_ServicePort(in *v1.ServicePort, out *api.ServicePort, s conversion.Scope) error { - return autoConvert_v1_ServicePort_To_api_ServicePort(in, out, s) -} - -func autoConvert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - out.Name = in.Name - out.Protocol = v1.Protocol(in.Protocol) - out.Port = in.Port - out.TargetPort = in.TargetPort - out.NodePort = in.NodePort - return nil -} - -// Convert_api_ServicePort_To_v1_ServicePort is an autogenerated conversion function. -func Convert_api_ServicePort_To_v1_ServicePort(in *api.ServicePort, out *v1.ServicePort, s conversion.Scope) error { - return autoConvert_api_ServicePort_To_v1_ServicePort(in, out, s) -} - -func autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions is an autogenerated conversion function. -func Convert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *api.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_v1_ServiceProxyOptions_To_api_ServiceProxyOptions(in, out, s) -} - -func autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - out.Path = in.Path - return nil -} - -// Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. -func Convert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *api.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { - return autoConvert_api_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) -} - -func autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - out.Ports = *(*[]api.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.Type = api.ServiceType(in.Type) - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.SessionAffinity = api.ServiceAffinity(in.SessionAffinity) - out.LoadBalancerIP = in.LoadBalancerIP - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalName = in.ExternalName - out.ExternalTrafficPolicy = api.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - out.SessionAffinityConfig = (*api.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - return nil -} - -// Convert_v1_ServiceSpec_To_api_ServiceSpec is an autogenerated conversion function. -func Convert_v1_ServiceSpec_To_api_ServiceSpec(in *v1.ServiceSpec, out *api.ServiceSpec, s conversion.Scope) error { - return autoConvert_v1_ServiceSpec_To_api_ServiceSpec(in, out, s) -} - -func autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - out.Type = v1.ServiceType(in.Type) - out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) - out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) - out.ClusterIP = in.ClusterIP - out.ExternalName = in.ExternalName - out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) - out.LoadBalancerIP = in.LoadBalancerIP - out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) - out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) - out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) - out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) - out.HealthCheckNodePort = in.HealthCheckNodePort - out.PublishNotReadyAddresses = in.PublishNotReadyAddresses - return nil -} - -// Convert_api_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. -func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { - return autoConvert_api_ServiceSpec_To_v1_ServiceSpec(in, out, s) -} - -func autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - if err := Convert_v1_LoadBalancerStatus_To_api_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_v1_ServiceStatus_To_api_ServiceStatus is an autogenerated conversion function. -func Convert_v1_ServiceStatus_To_api_ServiceStatus(in *v1.ServiceStatus, out *api.ServiceStatus, s conversion.Scope) error { - return autoConvert_v1_ServiceStatus_To_api_ServiceStatus(in, out, s) -} - -func autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - if err := Convert_api_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { - return err - } - return nil -} - -// Convert_api_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. -func Convert_api_ServiceStatus_To_v1_ServiceStatus(in *api.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { - return autoConvert_api_ServiceStatus_To_v1_ServiceStatus(in, out, s) -} - -func autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*api.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig is an autogenerated conversion function. -func Convert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *api.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_v1_SessionAffinityConfig_To_api_SessionAffinityConfig(in, out, s) -} - -func autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) - return nil -} - -// Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. -func Convert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *api.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { - return autoConvert_api_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) -} - -func autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *api.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSPersistentVolumeSource_To_api_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *api.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) -} - -func autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*api.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *api.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_v1_StorageOSVolumeSource_To_api_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - out.VolumeName = in.VolumeName - out.VolumeNamespace = in.VolumeNamespace - out.FSType = in.FSType - out.ReadOnly = in.ReadOnly - out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) - return nil -} - -// Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. -func Convert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *api.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { - return autoConvert_api_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) -} - -func autoConvert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_v1_Sysctl_To_api_Sysctl is an autogenerated conversion function. -func Convert_v1_Sysctl_To_api_Sysctl(in *v1.Sysctl, out *api.Sysctl, s conversion.Scope) error { - return autoConvert_v1_Sysctl_To_api_Sysctl(in, out, s) -} - -func autoConvert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - out.Name = in.Name - out.Value = in.Value - return nil -} - -// Convert_api_Sysctl_To_v1_Sysctl is an autogenerated conversion function. -func Convert_api_Sysctl_To_v1_Sysctl(in *api.Sysctl, out *v1.Sysctl, s conversion.Scope) error { - return autoConvert_api_Sysctl_To_v1_Sysctl(in, out, s) -} - -func autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_v1_TCPSocketAction_To_api_TCPSocketAction is an autogenerated conversion function. -func Convert_v1_TCPSocketAction_To_api_TCPSocketAction(in *v1.TCPSocketAction, out *api.TCPSocketAction, s conversion.Scope) error { - return autoConvert_v1_TCPSocketAction_To_api_TCPSocketAction(in, out, s) -} - -func autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - out.Port = in.Port - out.Host = in.Host - return nil -} - -// Convert_api_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. -func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { - return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) -} - -func autoConvert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) - return nil -} - -// Convert_v1_Taint_To_api_Taint is an autogenerated conversion function. -func Convert_v1_Taint_To_api_Taint(in *v1.Taint, out *api.Taint, s conversion.Scope) error { - return autoConvert_v1_Taint_To_api_Taint(in, out, s) -} - -func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - out.Key = in.Key - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) - return nil -} - -// Convert_api_Taint_To_v1_Taint is an autogenerated conversion function. -func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *v1.Taint, s conversion.Scope) error { - return autoConvert_api_Taint_To_v1_Taint(in, out, s) -} - -func autoConvert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = api.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = api.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_v1_Toleration_To_api_Toleration is an autogenerated conversion function. -func Convert_v1_Toleration_To_api_Toleration(in *v1.Toleration, out *api.Toleration, s conversion.Scope) error { - return autoConvert_v1_Toleration_To_api_Toleration(in, out, s) -} - -func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - out.Key = in.Key - out.Operator = v1.TolerationOperator(in.Operator) - out.Value = in.Value - out.Effect = v1.TaintEffect(in.Effect) - out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) - return nil -} - -// Convert_api_Toleration_To_v1_Toleration is an autogenerated conversion function. -func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *v1.Toleration, s conversion.Scope) error { - return autoConvert_api_Toleration_To_v1_Toleration(in, out, s) -} - -func autoConvert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_v1_VolumeSource_To_api_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Volume_To_api_Volume is an autogenerated conversion function. -func Convert_v1_Volume_To_api_Volume(in *v1.Volume, out *api.Volume, s conversion.Scope) error { - return autoConvert_v1_Volume_To_api_Volume(in, out, s) -} - -func autoConvert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - out.Name = in.Name - if err := Convert_api_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { - return err - } - return nil -} - -// Convert_api_Volume_To_v1_Volume is an autogenerated conversion function. -func Convert_api_Volume_To_v1_Volume(in *api.Volume, out *v1.Volume, s conversion.Scope) error { - return autoConvert_api_Volume_To_v1_Volume(in, out, s) -} - -func autoConvert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*api.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_v1_VolumeMount_To_api_VolumeMount is an autogenerated conversion function. -func Convert_v1_VolumeMount_To_api_VolumeMount(in *v1.VolumeMount, out *api.VolumeMount, s conversion.Scope) error { - return autoConvert_v1_VolumeMount_To_api_VolumeMount(in, out, s) -} - -func autoConvert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - out.Name = in.Name - out.ReadOnly = in.ReadOnly - out.MountPath = in.MountPath - out.SubPath = in.SubPath - out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) - return nil -} - -// Convert_api_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. -func Convert_api_VolumeMount_To_v1_VolumeMount(in *api.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { - return autoConvert_api_VolumeMount_To_v1_VolumeMount(in, out, s) -} - -func autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - out.Secret = (*api.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*api.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*api.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_v1_VolumeProjection_To_api_VolumeProjection is an autogenerated conversion function. -func Convert_v1_VolumeProjection_To_api_VolumeProjection(in *v1.VolumeProjection, out *api.VolumeProjection, s conversion.Scope) error { - return autoConvert_v1_VolumeProjection_To_api_VolumeProjection(in, out, s) -} - -func autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) - out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) - out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) - return nil -} - -// Convert_api_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. -func Convert_api_VolumeProjection_To_v1_VolumeProjection(in *api.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { - return autoConvert_api_VolumeProjection_To_v1_VolumeProjection(in, out, s) -} - -func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - out.HostPath = (*api.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*api.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*api.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*api.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*api.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*api.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*api.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*api.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*api.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*api.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*api.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.FlexVolume = (*api.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*api.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*api.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*api.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*api.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*api.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*api.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*api.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*api.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.Quobyte = (*api.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.AzureDisk = (*api.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*api.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*api.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*api.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*api.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*api.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_v1_VolumeSource_To_api_VolumeSource is an autogenerated conversion function. -func Convert_v1_VolumeSource_To_api_VolumeSource(in *v1.VolumeSource, out *api.VolumeSource, s conversion.Scope) error { - return autoConvert_v1_VolumeSource_To_api_VolumeSource(in, out, s) -} - -func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) - out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) - out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) - out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) - out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) - out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) - out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) - out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) - out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) - out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) - out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) - out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) - out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) - out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) - out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) - out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) - out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) - out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) - out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) - out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) - out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) - out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) - out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) - out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) - out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) - out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) - out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) - return nil -} - -// Convert_api_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. -func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { - return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s) -} - -func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - out.VolumePath = in.VolumePath - out.FSType = in.FSType - out.StoragePolicyName = in.StoragePolicyName - out.StoragePolicyID = in.StoragePolicyID - return nil -} - -// Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. -func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { - return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) -} - -func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in, out, s) -} - -func autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - out.Weight = in.Weight - if err := Convert_api_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { - return err - } - return nil -} - -// Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. -func Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *api.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { - return autoConvert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go b/vendor/k8s.io/kubernetes/pkg/api/validation/events.go deleted file mode 100644 index 4aec88d5..00000000 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/events.go +++ /dev/null @@ -1,84 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package validation - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/legacyscheme" -) - -// ValidateEvent makes sure that the event makes sense. -func ValidateEvent(event *api.Event) field.ErrorList { - allErrs := field.ErrorList{} - - // Make sure event.Namespace and the involvedObject.Namespace agree - if len(event.InvolvedObject.Namespace) == 0 { - // event.Namespace must also be empty (or "default", for compatibility with old clients) - if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } else { - // event namespace must match - if event.Namespace != event.InvolvedObject.Namespace { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) - } - } - - // For kinds we recognize, make sure involvedObject.Namespace is set for namespaced kinds - if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { - if namespaced && len(event.InvolvedObject.Namespace) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) - } - if !namespaced && len(event.InvolvedObject.Namespace) > 0 { - allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) - } - } - - for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { - allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) - } - return allErrs -} - -// Check whether the kind in groupVersion is scoped at the root of the api hierarchy -func isNamespacedKind(kind, groupVersion string) (bool, error) { - gv, err := schema.ParseGroupVersion(groupVersion) - if err != nil { - return false, err - } - g, err := legacyscheme.Registry.Group(gv.Group) - if err != nil { - return false, err - } - - restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version) - if err != nil { - return false, err - } - scopeName := restMapping.Scope.Name() - if scopeName == meta.RESTScopeNameNamespace { - return true, nil - } - return false, nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go new file mode 100644 index 00000000..ccf03453 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/annotations.go @@ -0,0 +1,34 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +// MetricSpecsAnnotation is the annotation which holds non-CPU-utilization HPA metric +// specs when converting the `Metrics` field from autoscaling/v2beta1 +const MetricSpecsAnnotation = "autoscaling.alpha.kubernetes.io/metrics" + +// MetricStatusesAnnotation is the annotation which holds non-CPU-utilization HPA metric +// statuses when converting the `CurrentMetrics` field from autoscaling/v2beta1 +const MetricStatusesAnnotation = "autoscaling.alpha.kubernetes.io/current-metrics" + +// HorizontalPodAutoscalerConditionsAnnotation is the annotation which holds the conditions +// of an HPA when converting the `Conditions` field from autoscaling/v2beta1 +const HorizontalPodAutoscalerConditionsAnnotation = "autoscaling.alpha.kubernetes.io/conditions" + +// DefaultCPUUtilization is the default value for CPU utilization, provided no other +// metrics are present. This is here because it's used by both the v2beta1 defaulting +// logic, and the pseudo-defaulting done in v1 conversion. +const DefaultCPUUtilization = 80 diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go new file mode 100644 index 00000000..7c91aac8 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package autoscaling // import "k8s.io/kubernetes/pkg/apis/autoscaling" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go new file mode 100644 index 00000000..6c321a3a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Scale{}, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go new file mode 100644 index 00000000..0e9d669a --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/types.go @@ -0,0 +1,363 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package autoscaling + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + api "k8s.io/kubernetes/pkg/apis/core" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector string +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" + Kind string + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string + // API version of the referent + // +optional + APIVersion string +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // ScaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference + // MinReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. + // +optional + MinReplicas *int32 + // MaxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 + // Metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +var ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // Type is the type of metric source. It should match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // TargetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // MetricName is the name of the metric in question + MetricName string + // TargetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // Name is the name of the resource in question. + Name api.ResourceName + // TargetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 + // TargetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // ObservedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 + + // LastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time + + // CurrentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 + + // DesiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 + + // CurrentMetrics is the last read state of the metrics used by this autoscaler. + CurrentMetrics []MetricStatus + + // Conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition +} + +// ConditionStatus indicates the status of a condition (true, false, or unknown). +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition; +// "ConditionFalse" means a resource is not in the condition; "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +var ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // Type describes the current condition + Type HorizontalPodAutoscalerConditionType + // Status is the status of the condition (True, False, Unknown) + Status ConditionStatus + // LastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time + // Reason is the reason for the condition's last transition. + // +optional + Reason string + // Message is a human-readable explanation containing details about + // the transition + // +optional + Message string +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // Type is the type of metric source. It will match one of the fields below. + Type MetricSourceType + + // Object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus + // Pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus + // Resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // Target is the described Kubernetes object. + Target CrossVersionObjectReference + + // MetricName is the name of the metric in question. + MetricName string + // CurrentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // MetricName is the name of the metric in question + MetricName string + // CurrentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name api.ResourceName + // CurrentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 + // CurrentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta + // Metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // Spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec + + // Status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta + // Metadata is the standard list metadata. + // +optional + metav1.ListMeta + + // Items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler +} diff --git a/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go new file mode 100644 index 00000000..c0bccf3b --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/autoscaling/zz_generated.deepcopy.go @@ -0,0 +1,481 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by deepcopy-gen. Do not edit it manually! + +package autoscaling + +import ( + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + if *in == nil { + *out = nil + } else { + *out = new(int64) + **out = **in + } + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + if *in == nil { + *out = nil + } else { + *out = new(v1.Time) + (*in).DeepCopyInto(*out) + } + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + if *in == nil { + *out = nil + } else { + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + if *in == nil { + *out = nil + } else { + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + if *in == nil { + *out = nil + } else { + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + if *in == nil { + *out = nil + } else { + *out = new(resource.Quantity) + **out = (*in).DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + if *in == nil { + *out = nil + } else { + *out = new(int32) + **out = **in + } + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } else { + return nil + } +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go index 6a6827bb..131fdd99 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/annotation_key_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/annotation_key_constants.go @@ -16,7 +16,7 @@ limitations under the License. // This file should be consistent with pkg/api/v1/annotation_key_constants.go. -package api +package core const ( // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy @@ -68,6 +68,10 @@ const ( // This annotation can be attached to node. ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + // BootstrapCheckpointAnnotationKey represents a Resource (Pod) that should be checkpointed by + // the kubelet prior to running + BootstrapCheckpointAnnotationKey string = "node.kubernetes.io/bootstrap-checkpoint" + // annotation key prefix used to identify non-convertible json paths. NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" diff --git a/vendor/k8s.io/kubernetes/pkg/api/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go similarity index 91% rename from vendor/k8s.io/kubernetes/pkg/api/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/doc.go index 283a83e4..6017bfda 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/doc.go @@ -14,11 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // Package api contains the latest (or "internal") version of the // Kubernetes API objects. This is the API objects as represented in memory. // The contract presented to clients is located in the versioned packages, // which are sub-directories. The first one is "v1". Those packages // describe how a particular version is serialized to storage/network. -package api // import "k8s.io/kubernetes/pkg/api" +package core // import "k8s.io/kubernetes/pkg/apis/core" diff --git a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go similarity index 92% rename from vendor/k8s.io/kubernetes/pkg/api/field_constants.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go index 5ead0f13..a26f8056 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/field_constants.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/field_constants.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core // Field path constants that are specific to the internal API // representation. @@ -25,8 +25,8 @@ const ( PodStatusField = "status.phase" SecretTypeField = "type" - EventReasonField = "reason" - EventSourceField = "source" + EventReasonField = "action" + EventSourceField = "reportingComponent" EventTypeField = "type" EventInvolvedKindField = "involvedObject.kind" EventInvolvedNamespaceField = "involvedObject.namespace" diff --git a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go similarity index 58% rename from vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go index f352f580..ca397c8e 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/helper/helpers.go @@ -28,30 +28,36 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/core" ) // IsHugePageResourceName returns true if the resource name has the huge page // resource prefix. -func IsHugePageResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceHugePagesPrefix) +func IsHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) +} + +// IsQuotaHugePageResourceName returns true if the resource name has the quota +// related huge page resource prefix. +func IsQuotaHugePageResourceName(name core.ResourceName) bool { + return strings.HasPrefix(string(name), core.ResourceHugePagesPrefix) || strings.HasPrefix(string(name), core.ResourceRequestsHugePagesPrefix) } // HugePageResourceName returns a ResourceName with the canonical hugepage // prefix prepended for the specified page size. The page size is converted // to its canonical representation. -func HugePageResourceName(pageSize resource.Quantity) api.ResourceName { - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceHugePagesPrefix, pageSize.String())) +func HugePageResourceName(pageSize resource.Quantity) core.ResourceName { + return core.ResourceName(fmt.Sprintf("%s%s", core.ResourceHugePagesPrefix, pageSize.String())) } // HugePageSizeFromResourceName returns the page size for the specified huge page // resource name. If the specified input is not a valid huge page resource name // an error is returned. -func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, error) { +func HugePageSizeFromResourceName(name core.ResourceName) (resource.Quantity, error) { if !IsHugePageResourceName(name) { return resource.Quantity{}, fmt.Errorf("resource name: %s is not valid hugepage name", name) } - pageSize := strings.TrimPrefix(string(name), api.ResourceHugePagesPrefix) + pageSize := strings.TrimPrefix(string(name), core.ResourceHugePagesPrefix) return resource.ParseQuantity(pageSize) } @@ -60,14 +66,14 @@ func HugePageSizeFromResourceName(name api.ResourceName) (resource.Quantity, err func NonConvertibleFields(annotations map[string]string) map[string]string { nonConvertibleKeys := map[string]string{} for key, value := range annotations { - if strings.HasPrefix(key, api.NonConvertibleAnnotationPrefix) { + if strings.HasPrefix(key, core.NonConvertibleAnnotationPrefix) { nonConvertibleKeys[key] = value } } return nonConvertibleKeys } -// Semantic can do semantic deep equality checks for api objects. +// Semantic can do semantic deep equality checks for core objects. // Example: apiequality.Semantic.DeepEqual(aPod, aPodWithNonNilButEmptyMaps) == true var Semantic = conversion.EqualitiesOrDie( func(a, b resource.Quantity) bool { @@ -92,10 +98,10 @@ var Semantic = conversion.EqualitiesOrDie( ) var standardResourceQuotaScopes = sets.NewString( - string(api.ResourceQuotaScopeTerminating), - string(api.ResourceQuotaScopeNotTerminating), - string(api.ResourceQuotaScopeBestEffort), - string(api.ResourceQuotaScopeNotBestEffort), + string(core.ResourceQuotaScopeTerminating), + string(core.ResourceQuotaScopeNotTerminating), + string(core.ResourceQuotaScopeBestEffort), + string(core.ResourceQuotaScopeNotBestEffort), ) // IsStandardResourceQuotaScope returns true if the scope is a standard value @@ -104,24 +110,24 @@ func IsStandardResourceQuotaScope(str string) bool { } var podObjectCountQuotaResources = sets.NewString( - string(api.ResourcePods), + string(core.ResourcePods), ) var podComputeQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), ) // IsResourceQuotaScopeValidForResource returns true if the resource applies to the specified scope -func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource string) bool { +func IsResourceQuotaScopeValidForResource(scope core.ResourceQuotaScope, resource string) bool { switch scope { - case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating, api.ResourceQuotaScopeNotBestEffort: + case core.ResourceQuotaScopeTerminating, core.ResourceQuotaScopeNotTerminating, core.ResourceQuotaScopeNotBestEffort: return podObjectCountQuotaResources.Has(resource) || podComputeQuotaResources.Has(resource) - case api.ResourceQuotaScopeBestEffort: + case core.ResourceQuotaScopeBestEffort: return podObjectCountQuotaResources.Has(resource) default: return true @@ -129,62 +135,45 @@ func IsResourceQuotaScopeValidForResource(scope api.ResourceQuotaScope, resource } var standardContainerResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), ) // IsStandardContainerResourceName returns true if the container can make a resource request // for the specified resource func IsStandardContainerResourceName(str string) bool { - return standardContainerResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) + return standardContainerResources.Has(str) || IsHugePageResourceName(core.ResourceName(str)) } // IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. -func IsExtendedResourceName(name api.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) +// default namespace. +func IsExtendedResourceName(name core.ResourceName) bool { + return !IsDefaultNamespaceResource(name) } // IsDefaultNamespaceResource returns true if the resource name is in the // *kubernetes.io/ namespace. Partially-qualified (unprefixed) names are // implicitly in the kubernetes.io/ namespace. -func IsDefaultNamespaceResource(name api.ResourceName) bool { +func IsDefaultNamespaceResource(name core.ResourceName) bool { return !strings.Contains(string(name), "/") || - strings.Contains(string(name), api.ResourceDefaultNamespacePrefix) + strings.Contains(string(name), core.ResourceDefaultNamespacePrefix) } -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name api.ResourceName) bool { - return strings.HasPrefix(string(name), api.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) api.ResourceName { - if IsOpaqueIntResourceName(api.ResourceName(name)) { - return api.ResourceName(name) - } - return api.ResourceName(fmt.Sprintf("%s%s", api.ResourceOpaqueIntPrefix, name)) -} - -var overcommitBlacklist = sets.NewString(string(api.ResourceNvidiaGPU)) +var overcommitBlacklist = sets.NewString(string(core.ResourceNvidiaGPU)) // IsOvercommitAllowed returns true if the resource is in the default // namespace and not blacklisted. -func IsOvercommitAllowed(name api.ResourceName) bool { +func IsOvercommitAllowed(name core.ResourceName) bool { return IsDefaultNamespaceResource(name) && !IsHugePageResourceName(name) && !overcommitBlacklist.Has(string(name)) } var standardLimitRangeTypes = sets.NewString( - string(api.LimitTypePod), - string(api.LimitTypeContainer), - string(api.LimitTypePersistentVolumeClaim), + string(core.LimitTypePod), + string(core.LimitTypeContainer), + string(core.LimitTypePersistentVolumeClaim), ) // IsStandardLimitRangeType returns true if the type is Pod or Container @@ -193,112 +182,112 @@ func IsStandardLimitRangeType(str string) bool { } var standardQuotaResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsStorage), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceConfigMaps), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsStorage), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceConfigMaps), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsStandardQuotaResourceName returns true if the resource is known to // the quota tracking system func IsStandardQuotaResourceName(str string) bool { - return standardQuotaResources.Has(str) + return standardQuotaResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) } var standardResources = sets.NewString( - string(api.ResourceCPU), - string(api.ResourceMemory), - string(api.ResourceEphemeralStorage), - string(api.ResourceRequestsCPU), - string(api.ResourceRequestsMemory), - string(api.ResourceRequestsEphemeralStorage), - string(api.ResourceLimitsCPU), - string(api.ResourceLimitsMemory), - string(api.ResourceLimitsEphemeralStorage), - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceStorage), - string(api.ResourceRequestsStorage), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourceCPU), + string(core.ResourceMemory), + string(core.ResourceEphemeralStorage), + string(core.ResourceRequestsCPU), + string(core.ResourceRequestsMemory), + string(core.ResourceRequestsEphemeralStorage), + string(core.ResourceLimitsCPU), + string(core.ResourceLimitsMemory), + string(core.ResourceLimitsEphemeralStorage), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceStorage), + string(core.ResourceRequestsStorage), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsStandardResourceName returns true if the resource is known to the system func IsStandardResourceName(str string) bool { - return standardResources.Has(str) || IsHugePageResourceName(api.ResourceName(str)) + return standardResources.Has(str) || IsQuotaHugePageResourceName(core.ResourceName(str)) } var integerResources = sets.NewString( - string(api.ResourcePods), - string(api.ResourceQuotas), - string(api.ResourceServices), - string(api.ResourceReplicationControllers), - string(api.ResourceSecrets), - string(api.ResourceConfigMaps), - string(api.ResourcePersistentVolumeClaims), - string(api.ResourceServicesNodePorts), - string(api.ResourceServicesLoadBalancers), + string(core.ResourcePods), + string(core.ResourceQuotas), + string(core.ResourceServices), + string(core.ResourceReplicationControllers), + string(core.ResourceSecrets), + string(core.ResourceConfigMaps), + string(core.ResourcePersistentVolumeClaims), + string(core.ResourceServicesNodePorts), + string(core.ResourceServicesLoadBalancers), ) // IsIntegerResourceName returns true if the resource is measured in integer values func IsIntegerResourceName(str string) bool { - return integerResources.Has(str) || IsExtendedResourceName(api.ResourceName(str)) + return integerResources.Has(str) || IsExtendedResourceName(core.ResourceName(str)) } // Extended and HugePages resources -func IsScalarResourceName(name api.ResourceName) bool { +func IsScalarResourceName(name core.ResourceName) bool { return IsExtendedResourceName(name) || IsHugePageResourceName(name) } // this function aims to check if the service's ClusterIP is set or not // the objective is not to perform validation here -func IsServiceIPSet(service *api.Service) bool { - return service.Spec.ClusterIP != api.ClusterIPNone && service.Spec.ClusterIP != "" +func IsServiceIPSet(service *core.Service) bool { + return service.Spec.ClusterIP != core.ClusterIPNone && service.Spec.ClusterIP != "" } // this function aims to check if the service's cluster IP is requested or not -func IsServiceIPRequested(service *api.Service) bool { +func IsServiceIPRequested(service *core.Service) bool { // ExternalName services are CNAME aliases to external ones. Ignore the IP. - if service.Spec.Type == api.ServiceTypeExternalName { + if service.Spec.Type == core.ServiceTypeExternalName { return false } return service.Spec.ClusterIP == "" } var standardFinalizers = sets.NewString( - string(api.FinalizerKubernetes), + string(core.FinalizerKubernetes), metav1.FinalizerOrphanDependents, metav1.FinalizerDeleteDependents, ) // HasAnnotation returns a bool if passed in annotation exists -func HasAnnotation(obj api.ObjectMeta, ann string) bool { +func HasAnnotation(obj core.ObjectMeta, ann string) bool { _, found := obj.Annotations[ann] return found } // SetMetaDataAnnotation sets the annotation and value -func SetMetaDataAnnotation(obj *api.ObjectMeta, ann string, value string) { +func SetMetaDataAnnotation(obj *core.ObjectMeta, ann string, value string) { if obj.Annotations == nil { obj.Annotations = make(map[string]string) } @@ -311,7 +300,7 @@ func IsStandardFinalizerName(str string) bool { // AddToNodeAddresses appends the NodeAddresses to the passed-by-pointer slice, // only if they do not already exist -func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAddress) { +func AddToNodeAddresses(addresses *[]core.NodeAddress, addAddresses ...core.NodeAddress) { for _, add := range addAddresses { exists := false for _, existing := range *addresses { @@ -327,11 +316,11 @@ func AddToNodeAddresses(addresses *[]api.NodeAddress, addAddresses ...api.NodeAd } // TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusEqual(l, r *api.LoadBalancerStatus) bool { +func LoadBalancerStatusEqual(l, r *core.LoadBalancerStatus) bool { return ingressSliceEqual(l.Ingress, r.Ingress) } -func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { +func ingressSliceEqual(lhs, rhs []core.LoadBalancerIngress) bool { if len(lhs) != len(rhs) { return false } @@ -343,7 +332,7 @@ func ingressSliceEqual(lhs, rhs []api.LoadBalancerIngress) bool { return true } -func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool { +func ingressEqual(lhs, rhs *core.LoadBalancerIngress) bool { if lhs.IP != rhs.IP { return false } @@ -354,9 +343,9 @@ func ingressEqual(lhs, rhs *api.LoadBalancerIngress) bool { } // TODO: make method on LoadBalancerStatus? -func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerStatus { - c := &api.LoadBalancerStatus{} - c.Ingress = make([]api.LoadBalancerIngress, len(lb.Ingress)) +func LoadBalancerStatusDeepCopy(lb *core.LoadBalancerStatus) *core.LoadBalancerStatus { + c := &core.LoadBalancerStatus{} + c.Ingress = make([]core.LoadBalancerIngress, len(lb.Ingress)) for i := range lb.Ingress { c.Ingress[i] = lb.Ingress[i] } @@ -365,42 +354,42 @@ func LoadBalancerStatusDeepCopy(lb *api.LoadBalancerStatus) *api.LoadBalancerSta // GetAccessModesAsString returns a string representation of an array of access modes. // modes, when present, are always in the same order: RWO,ROX,RWX. -func GetAccessModesAsString(modes []api.PersistentVolumeAccessMode) string { +func GetAccessModesAsString(modes []core.PersistentVolumeAccessMode) string { modes = removeDuplicateAccessModes(modes) modesStr := []string{} - if containsAccessMode(modes, api.ReadWriteOnce) { + if containsAccessMode(modes, core.ReadWriteOnce) { modesStr = append(modesStr, "RWO") } - if containsAccessMode(modes, api.ReadOnlyMany) { + if containsAccessMode(modes, core.ReadOnlyMany) { modesStr = append(modesStr, "ROX") } - if containsAccessMode(modes, api.ReadWriteMany) { + if containsAccessMode(modes, core.ReadWriteMany) { modesStr = append(modesStr, "RWX") } return strings.Join(modesStr, ",") } // GetAccessModesAsString returns an array of AccessModes from a string created by GetAccessModesAsString -func GetAccessModesFromString(modes string) []api.PersistentVolumeAccessMode { +func GetAccessModesFromString(modes string) []core.PersistentVolumeAccessMode { strmodes := strings.Split(modes, ",") - accessModes := []api.PersistentVolumeAccessMode{} + accessModes := []core.PersistentVolumeAccessMode{} for _, s := range strmodes { s = strings.Trim(s, " ") switch { case s == "RWO": - accessModes = append(accessModes, api.ReadWriteOnce) + accessModes = append(accessModes, core.ReadWriteOnce) case s == "ROX": - accessModes = append(accessModes, api.ReadOnlyMany) + accessModes = append(accessModes, core.ReadOnlyMany) case s == "RWX": - accessModes = append(accessModes, api.ReadWriteMany) + accessModes = append(accessModes, core.ReadWriteMany) } } return accessModes } // removeDuplicateAccessModes returns an array of access modes without any duplicates -func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.PersistentVolumeAccessMode { - accessModes := []api.PersistentVolumeAccessMode{} +func removeDuplicateAccessModes(modes []core.PersistentVolumeAccessMode) []core.PersistentVolumeAccessMode { + accessModes := []core.PersistentVolumeAccessMode{} for _, m := range modes { if !containsAccessMode(accessModes, m) { accessModes = append(accessModes, m) @@ -409,7 +398,7 @@ func removeDuplicateAccessModes(modes []api.PersistentVolumeAccessMode) []api.Pe return accessModes } -func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.PersistentVolumeAccessMode) bool { +func containsAccessMode(modes []core.PersistentVolumeAccessMode, mode core.PersistentVolumeAccessMode) bool { for _, m := range modes { if m == mode { return true @@ -418,9 +407,9 @@ func containsAccessMode(modes []api.PersistentVolumeAccessMode, mode api.Persist return false } -// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement api type into a struct that implements +// NodeSelectorRequirementsAsSelector converts the []NodeSelectorRequirement core type into a struct that implements // labels.Selector. -func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labels.Selector, error) { +func NodeSelectorRequirementsAsSelector(nsm []core.NodeSelectorRequirement) (labels.Selector, error) { if len(nsm) == 0 { return labels.Nothing(), nil } @@ -428,17 +417,17 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe for _, expr := range nsm { var op selection.Operator switch expr.Operator { - case api.NodeSelectorOpIn: + case core.NodeSelectorOpIn: op = selection.In - case api.NodeSelectorOpNotIn: + case core.NodeSelectorOpNotIn: op = selection.NotIn - case api.NodeSelectorOpExists: + case core.NodeSelectorOpExists: op = selection.Exists - case api.NodeSelectorOpDoesNotExist: + case core.NodeSelectorOpDoesNotExist: op = selection.DoesNotExist - case api.NodeSelectorOpGt: + case core.NodeSelectorOpGt: op = selection.GreaterThan - case api.NodeSelectorOpLt: + case core.NodeSelectorOpLt: op = selection.LessThan default: return nil, fmt.Errorf("%q is not a valid node selector operator", expr.Operator) @@ -453,11 +442,11 @@ func NodeSelectorRequirementsAsSelector(nsm []api.NodeSelectorRequirement) (labe } // GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations -// and converts it to the []Toleration type in api. -func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Toleration, error) { - var tolerations []api.Toleration - if len(annotations) > 0 && annotations[api.TolerationsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TolerationsAnnotationKey]), &tolerations) +// and converts it to the []Toleration type in core. +func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]core.Toleration, error) { + var tolerations []core.Toleration + if len(annotations) > 0 && annotations[core.TolerationsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TolerationsAnnotationKey]), &tolerations) if err != nil { return tolerations, err } @@ -467,10 +456,10 @@ func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]api.Tole // AddOrUpdateTolerationInPod tries to add a toleration to the pod's toleration list. // Returns true if something was updated, false otherwise. -func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool { +func AddOrUpdateTolerationInPod(pod *core.Pod, toleration *core.Toleration) bool { podTolerations := pod.Spec.Tolerations - var newTolerations []api.Toleration + var newTolerations []core.Toleration updated := false for i := range podTolerations { if toleration.MatchToleration(&podTolerations[i]) { @@ -494,7 +483,7 @@ func AddOrUpdateTolerationInPod(pod *api.Pod, toleration *api.Toleration) bool { } // TolerationToleratesTaint checks if the toleration tolerates the taint. -func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool { +func TolerationToleratesTaint(toleration *core.Toleration, taint *core.Taint) bool { if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect { return false } @@ -503,17 +492,17 @@ func TolerationToleratesTaint(toleration *api.Toleration, taint *api.Taint) bool return false } // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - if (len(toleration.Operator) == 0 || toleration.Operator == api.TolerationOpEqual) && toleration.Value == taint.Value { + if (len(toleration.Operator) == 0 || toleration.Operator == core.TolerationOpEqual) && toleration.Value == taint.Value { return true } - if toleration.Operator == api.TolerationOpExists { + if toleration.Operator == core.TolerationOpExists { return true } return false } // TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations. -func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) bool { +func TaintToleratedByTolerations(taint *core.Taint, tolerations []core.Toleration) bool { tolerated := false for i := range tolerations { if TolerationToleratesTaint(&tolerations[i], taint) { @@ -525,13 +514,13 @@ func TaintToleratedByTolerations(taint *api.Taint, tolerations []api.Toleration) } // GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations -// and converts it to the []Taint type in api. -func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, error) { - var taints []api.Taint - if len(annotations) > 0 && annotations[api.TaintsAnnotationKey] != "" { - err := json.Unmarshal([]byte(annotations[api.TaintsAnnotationKey]), &taints) +// and converts it to the []Taint type in core. +func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]core.Taint, error) { + var taints []core.Taint + if len(annotations) > 0 && annotations[core.TaintsAnnotationKey] != "" { + err := json.Unmarshal([]byte(annotations[core.TaintsAnnotationKey]), &taints) if err != nil { - return []api.Taint{}, err + return []core.Taint{}, err } } return taints, nil @@ -540,12 +529,12 @@ func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]api.Taint, e // SysctlsFromPodAnnotations parses the sysctl annotations into a slice of safe Sysctls // and a slice of unsafe Sysctls. This is only a convenience wrapper around // SysctlsFromPodAnnotation. -func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, error) { - safe, err := SysctlsFromPodAnnotation(a[api.SysctlsPodAnnotationKey]) +func SysctlsFromPodAnnotations(a map[string]string) ([]core.Sysctl, []core.Sysctl, error) { + safe, err := SysctlsFromPodAnnotation(a[core.SysctlsPodAnnotationKey]) if err != nil { return nil, nil, err } - unsafe, err := SysctlsFromPodAnnotation(a[api.UnsafeSysctlsPodAnnotationKey]) + unsafe, err := SysctlsFromPodAnnotation(a[core.UnsafeSysctlsPodAnnotationKey]) if err != nil { return nil, nil, err } @@ -554,13 +543,13 @@ func SysctlsFromPodAnnotations(a map[string]string) ([]api.Sysctl, []api.Sysctl, } // SysctlsFromPodAnnotation parses an annotation value into a slice of Sysctls. -func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) { +func SysctlsFromPodAnnotation(annotation string) ([]core.Sysctl, error) { if len(annotation) == 0 { return nil, nil } kvs := strings.Split(annotation, ",") - sysctls := make([]api.Sysctl, len(kvs)) + sysctls := make([]core.Sysctl, len(kvs)) for i, kv := range kvs { cs := strings.Split(kv, "=") if len(cs) != 2 || len(cs[0]) == 0 { @@ -573,7 +562,7 @@ func SysctlsFromPodAnnotation(annotation string) ([]api.Sysctl, error) { } // PodAnnotationsFromSysctls creates an annotation value for a slice of Sysctls. -func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string { +func PodAnnotationsFromSysctls(sysctls []core.Sysctl) string { if len(sysctls) == 0 { return "" } @@ -586,9 +575,9 @@ func PodAnnotationsFromSysctls(sysctls []api.Sysctl) string { } // GetPersistentVolumeClass returns StorageClassName. -func GetPersistentVolumeClass(volume *api.PersistentVolume) string { +func GetPersistentVolumeClass(volume *core.PersistentVolume) string { // Use beta annotation first - if class, found := volume.Annotations[api.BetaStorageClassAnnotation]; found { + if class, found := volume.Annotations[core.BetaStorageClassAnnotation]; found { return class } @@ -597,9 +586,9 @@ func GetPersistentVolumeClass(volume *api.PersistentVolume) string { // GetPersistentVolumeClaimClass returns StorageClassName. If no storage class was // requested, it returns "". -func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string { +func GetPersistentVolumeClaimClass(claim *core.PersistentVolumeClaim) string { // Use beta annotation first - if class, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { + if class, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { return class } @@ -611,9 +600,9 @@ func GetPersistentVolumeClaimClass(claim *api.PersistentVolumeClaim) string { } // PersistentVolumeClaimHasClass returns true if given claim has set StorageClassName field. -func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool { +func PersistentVolumeClaimHasClass(claim *core.PersistentVolumeClaim) bool { // Use beta annotation first - if _, found := claim.Annotations[api.BetaStorageClassAnnotation]; found { + if _, found := claim.Annotations[core.BetaStorageClassAnnotation]; found { return true } @@ -625,12 +614,12 @@ func PersistentVolumeClaimHasClass(claim *api.PersistentVolumeClaim) bool { } // GetStorageNodeAffinityFromAnnotation gets the json serialized data from PersistentVolume.Annotations -// and converts it to the NodeAffinity type in api. +// and converts it to the NodeAffinity type in core. // TODO: update when storage node affinity graduates to beta -func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.NodeAffinity, error) { - if len(annotations) > 0 && annotations[api.AlphaStorageNodeAffinityAnnotation] != "" { - var affinity api.NodeAffinity - err := json.Unmarshal([]byte(annotations[api.AlphaStorageNodeAffinityAnnotation]), &affinity) +func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*core.NodeAffinity, error) { + if len(annotations) > 0 && annotations[core.AlphaStorageNodeAffinityAnnotation] != "" { + var affinity core.NodeAffinity + err := json.Unmarshal([]byte(annotations[core.AlphaStorageNodeAffinityAnnotation]), &affinity) if err != nil { return nil, err } @@ -641,7 +630,7 @@ func GetStorageNodeAffinityFromAnnotation(annotations map[string]string) (*api.N // Converts NodeAffinity type to Alpha annotation for use in PersistentVolumes // TODO: update when storage node affinity graduates to beta -func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *api.NodeAffinity) error { +func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinity *core.NodeAffinity) error { if affinity == nil { return nil } @@ -650,6 +639,6 @@ func StorageNodeAffinityToAlphaAnnotation(annotations map[string]string, affinit if err != nil { return err } - annotations[api.AlphaStorageNodeAffinityAnnotation] = string(json) + annotations[core.AlphaStorageNodeAffinityAnnotation] = string(json) return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/install/install.go b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go similarity index 89% rename from vendor/k8s.io/kubernetes/pkg/api/install/install.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go index 5fbffe23..cae514ec 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/install/install.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/install/install.go @@ -23,9 +23,9 @@ import ( "k8s.io/apimachinery/pkg/apimachinery/registered" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/v1" ) func init() { @@ -36,9 +36,9 @@ func init() { func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) { if err := announced.NewGroupMetaFactory( &announced.GroupMetaFactoryArgs{ - GroupName: api.GroupName, + GroupName: core.GroupName, VersionPreferenceOrder: []string{v1.SchemeGroupVersion.Version}, - AddInternalObjectsToScheme: api.AddToScheme, + AddInternalObjectsToScheme: core.AddToScheme, RootScopedKinds: sets.NewString( "Node", "Namespace", @@ -56,9 +56,6 @@ func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *r "PodProxyOptions", "NodeProxyOptions", "ServiceProxyOptions", - "ThirdPartyResource", - "ThirdPartyResourceData", - "ThirdPartyResourceList", ), }, announced.VersionToSchemeFunc{ diff --git a/vendor/k8s.io/kubernetes/pkg/api/json.go b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/json.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/json.go index 3a6e04c1..937cd056 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/json.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/json.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import "encoding/json" diff --git a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/objectreference.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go index b36ecab2..55b27f30 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/objectreference.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/objectreference.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import ( "k8s.io/apimachinery/pkg/runtime/schema" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go new file mode 100644 index 00000000..cf199cee --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/pods/helpers.go @@ -0,0 +1,63 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package pods + +import ( + "fmt" + + "k8s.io/kubernetes/pkg/fieldpath" +) + +// ConvertDownwardAPIFieldLabel converts the specified downward API field label +// and its value in the pod of the specified version to the internal version, +// and returns the converted label and value. This function returns an error if +// the conversion fails. +func ConvertDownwardAPIFieldLabel(version, label, value string) (string, string, error) { + if version != "v1" { + return "", "", fmt.Errorf("unsupported pod version: %s", version) + } + + if path, _, ok := fieldpath.SplitMaybeSubscriptedPath(label); ok { + switch path { + case "metadata.annotations", "metadata.labels": + return label, value, nil + default: + return "", "", fmt.Errorf("field label does not support subscript: %s", label) + } + } + + switch label { + case "metadata.annotations", + "metadata.labels", + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.restartPolicy", + "spec.serviceAccountName", + "spec.schedulerName", + "status.phase", + "status.hostIP", + "status.podIP": + return label, value, nil + // This is for backwards compatibility with old v1 clients which send spec.host + case "spec.host": + return "spec.nodeName", value, nil + default: + return "", "", fmt.Errorf("field label not supported: %s", label) + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/register.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/register.go index 8029a957..2784cbe1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/register.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/resource.go b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go similarity index 99% rename from vendor/k8s.io/kubernetes/pkg/api/resource.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/resource.go index ce1747d8..1910cd92 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/resource.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/resource.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( "k8s.io/apimachinery/pkg/api/resource" diff --git a/vendor/k8s.io/kubernetes/pkg/api/taint.go b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/taint.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/taint.go index 173e4e60..ae1feb74 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/taint.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/taint.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core import "fmt" diff --git a/vendor/k8s.io/kubernetes/pkg/api/toleration.go b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go similarity index 98% rename from vendor/k8s.io/kubernetes/pkg/api/toleration.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go index edb73b74..1dfbc9f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/toleration.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/toleration.go @@ -17,7 +17,7 @@ limitations under the License. //TODO: consider making these methods functions, because we don't want helper //functions in the k8s.io/api repo. -package api +package core // MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , // if the two tolerations have same combination, regard as they match. diff --git a/vendor/k8s.io/kubernetes/pkg/api/types.go b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go similarity index 95% rename from vendor/k8s.io/kubernetes/pkg/api/types.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/types.go index f6d7aa89..032e6d84 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package api +package core import ( "k8s.io/apimachinery/pkg/api/resource" @@ -347,10 +347,10 @@ type PersistentVolumeSource struct { // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime // +optional Quobyte *QuobyteVolumeSource - // ISCSIVolumeSource represents an ISCSI resource that is attached to a + // ISCSIPersistentVolumeSource represents an ISCSI resource that is attached to a // kubelet's host machine and then exposed to the pod. // +optional - ISCSI *ISCSIVolumeSource + ISCSI *ISCSIPersistentVolumeSource // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. This is an alpha feature and may change in future. // +optional @@ -391,6 +391,9 @@ type PersistentVolumeSource struct { // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource + // CSI (Container Storage Interface) represents storage that handled by an external CSI driver + // +optional + CSI *CSIPersistentVolumeSource } type PersistentVolumeClaimVolumeSource struct { @@ -404,7 +407,7 @@ type PersistentVolumeClaimVolumeSource struct { const ( // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility + // It's deprecated and will be removed in a future release. (#51440) BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" // MountOptionAnnotation defines mount option annotation used in PVs @@ -459,6 +462,11 @@ type PersistentVolumeSpec struct { // simply fail if one is invalid. // +optional MountOptions []string + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode } // PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes @@ -476,6 +484,16 @@ const ( PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" ) +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + type PersistentVolumeStatus struct { // Phase indicates if a volume is available, bound to a claim, or released by a claim // +optional @@ -545,6 +563,11 @@ type PersistentVolumeClaimSpec struct { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#class-1 // +optional StorageClassName *string + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. + // +optional + VolumeMode *PersistentVolumeMode } type PersistentVolumeClaimConditionType string @@ -770,6 +793,54 @@ type ISCSIVolumeSource struct { InitiatorName *string } +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // Required: iSCSI target portal + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + TargetPortal string + // Required: target iSCSI Qualified Name + // +optional + IQN string + // Required: iSCSI target lun number + // +optional + Lun int32 + // Optional: Defaults to 'default' (tcp). iSCSI interface name that uses an iSCSI transport. + // +optional + ISCSIInterface string + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool + // Optional: list of iSCSI target portal ips for high availability. + // the portal is either an IP or ip_addr:port if port is other than default (typically TCP ports 860 and 3260) + // +optional + Portals []string + // Optional: whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool + // Optional: whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool + // Optional: CHAP secret for iSCSI target and initiator authentication. + // The secret is used if either DiscoveryCHAPAuth or SessionCHAPAuth is true + // +optional + SecretRef *SecretReference + // Optional: Custom initiator name per volume. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string +} + // Represents a Fibre Channel volume. // Fibre Channel volumes can only be mounted as read/write once. // Fibre Channel volumes support ownership management and SELinux relabeling. @@ -1503,6 +1574,23 @@ type LocalVolumeSource struct { Path string } +// Represents storage that is managed by an external CSI volume driver +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool +} + // ContainerPort represents a network port in a single container type ContainerPort struct { // Optional: If specified, this must be an IANA_SVC_NAME Each named port @@ -1530,7 +1618,9 @@ type VolumeMount struct { // Optional: Defaults to false (read-write). // +optional ReadOnly bool - // Required. Must not contain ':'. + // Required. If the path is not an absolute path (e.g. some/path) it + // will be prepended with the appropriate root prefix for the operating + // system. On Linux this is '/', on Windows this is 'C:\'. MountPath string // Path within the volume from which the container's volume should be mounted. // Defaults to "" (volume's root). @@ -1564,6 +1654,14 @@ const ( MountPropagationBidirectional MountPropagationMode = "Bidirectional" ) +// VolumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string +} + // EnvVar represents an environment variable present in a Container. type EnvVar struct { // Required: This must be a C_IDENTIFIER. @@ -1857,6 +1955,10 @@ type Container struct { Resources ResourceRequirements // +optional VolumeMounts []VolumeMount + // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. + // +optional + VolumeDevices []VolumeDevice // +optional LivenessProbe *Probe // +optional @@ -2081,6 +2183,11 @@ const ( // DNSDefault indicates that the pod should use the default (as // determined by kubelet) DNS settings. DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" ) // A node selector represents the union of the results of one or more label queries @@ -2380,7 +2487,12 @@ type PodSpec struct { // before the system actively tries to terminate the pod; value must be positive integer // +optional ActiveDeadlineSeconds *int64 - // Required: Set DNS policy. + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. // +optional DNSPolicy DNSPolicy // NodeSelector is a selector which must be true for the pod to fit on a node @@ -2444,6 +2556,11 @@ type PodSpec struct { // The higher the value, the higher the priority. // +optional Priority *int32 + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig } // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the @@ -2533,6 +2650,35 @@ const ( PodQOSBestEffort PodQOSClass = "BestEffort" ) +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string + // +optional + Value *string +} + // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system. type PodStatus struct { @@ -2725,8 +2871,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/extensions.Scale,result=k8s.io/kubernetes/pkg/apis/extensions.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -2916,7 +3062,8 @@ type ServiceSpec struct { // ExternalName is the external reference that kubedns or equivalent will // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid DNS name and requires Type to be ExternalName. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. ExternalName string // ExternalIPs are used by external load balancers, or can be set by @@ -3406,8 +3553,6 @@ const ( ) const ( - // Namespace prefix for opaque counted resources (alpha). - ResourceOpaqueIntPrefix = "pod.alpha.kubernetes.io/opaque-int-resource-" // Default namespace prefix. ResourceDefaultNamespacePrefix = "kubernetes.io/" // Name prefix for huge page resources (alpha). @@ -3575,6 +3720,10 @@ type DeleteOptions struct { // Either this field or OrphanDependents may be set, but not both. // The default policy is decided by the existing finalizer set in the // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. // +optional PropagationPolicy *DeletionPropagation } @@ -3809,7 +3958,7 @@ type Event struct { // +optional metav1.ObjectMeta - // Required. The object that this event is about. + // Required. The object that this event is about. Mapped to events.Event.regarding // +optional InvolvedObject ObjectReference @@ -3821,7 +3970,7 @@ type Event struct { Reason string // Optional. A human-readable description of the status of this operation. - // TODO: decide on maximum length. + // TODO: decide on maximum length. Mapped to events.Event.note // +optional Message string @@ -3844,8 +3993,49 @@ type Event struct { // Type of this event (Normal, Warning), new types could be added in the future. // +optional Type string + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string } +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 + // Time of the last occurence observed + LastObservedTime metav1.MicroTime + // State of this Series: Ongoing or Finished + State EventSeriesState +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // EventList is a list of events. @@ -3964,6 +4154,13 @@ const ( ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" ) +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" +) + // A ResourceQuotaScope defines a filter that must match each object tracked by a quota type ResourceQuotaScope string @@ -4350,5 +4547,5 @@ const ( // corresponding to every RequiredDuringScheduling affinity rule. // When the --hard-pod-affinity-weight scheduler flag is not specified, // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int = 1 + DefaultHardPodAffinitySymmetricWeight int32 = 1 ) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go similarity index 72% rename from vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go index c5b962a7..8f6e09b6 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/conversion.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/conversion.go @@ -26,7 +26,7 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/apis/extensions" ) @@ -38,79 +38,79 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error { switch a := objA.(type) { case *v1.Pod: switch b := objB.(type) { - case *api.Pod: - return true, Convert_v1_Pod_To_api_Pod(a, b, s) + case *core.Pod: + return true, Convert_v1_Pod_To_core_Pod(a, b, s) } - case *api.Pod: + case *core.Pod: switch b := objB.(type) { case *v1.Pod: - return true, Convert_api_Pod_To_v1_Pod(a, b, s) + return true, Convert_core_Pod_To_v1_Pod(a, b, s) } case *v1.Event: switch b := objB.(type) { - case *api.Event: - return true, Convert_v1_Event_To_api_Event(a, b, s) + case *core.Event: + return true, Convert_v1_Event_To_core_Event(a, b, s) } - case *api.Event: + case *core.Event: switch b := objB.(type) { case *v1.Event: - return true, Convert_api_Event_To_v1_Event(a, b, s) + return true, Convert_core_Event_To_v1_Event(a, b, s) } case *v1.ReplicationController: switch b := objB.(type) { - case *api.ReplicationController: - return true, Convert_v1_ReplicationController_To_api_ReplicationController(a, b, s) + case *core.ReplicationController: + return true, Convert_v1_ReplicationController_To_core_ReplicationController(a, b, s) } - case *api.ReplicationController: + case *core.ReplicationController: switch b := objB.(type) { case *v1.ReplicationController: - return true, Convert_api_ReplicationController_To_v1_ReplicationController(a, b, s) + return true, Convert_core_ReplicationController_To_v1_ReplicationController(a, b, s) } case *v1.Node: switch b := objB.(type) { - case *api.Node: - return true, Convert_v1_Node_To_api_Node(a, b, s) + case *core.Node: + return true, Convert_v1_Node_To_core_Node(a, b, s) } - case *api.Node: + case *core.Node: switch b := objB.(type) { case *v1.Node: - return true, Convert_api_Node_To_v1_Node(a, b, s) + return true, Convert_core_Node_To_v1_Node(a, b, s) } case *v1.Namespace: switch b := objB.(type) { - case *api.Namespace: - return true, Convert_v1_Namespace_To_api_Namespace(a, b, s) + case *core.Namespace: + return true, Convert_v1_Namespace_To_core_Namespace(a, b, s) } - case *api.Namespace: + case *core.Namespace: switch b := objB.(type) { case *v1.Namespace: - return true, Convert_api_Namespace_To_v1_Namespace(a, b, s) + return true, Convert_core_Namespace_To_v1_Namespace(a, b, s) } case *v1.Service: switch b := objB.(type) { - case *api.Service: - return true, Convert_v1_Service_To_api_Service(a, b, s) + case *core.Service: + return true, Convert_v1_Service_To_core_Service(a, b, s) } - case *api.Service: + case *core.Service: switch b := objB.(type) { case *v1.Service: - return true, Convert_api_Service_To_v1_Service(a, b, s) + return true, Convert_core_Service_To_v1_Service(a, b, s) } case *v1.Endpoints: switch b := objB.(type) { - case *api.Endpoints: - return true, Convert_v1_Endpoints_To_api_Endpoints(a, b, s) + case *core.Endpoints: + return true, Convert_v1_Endpoints_To_core_Endpoints(a, b, s) } - case *api.Endpoints: + case *core.Endpoints: switch b := objB.(type) { case *v1.Endpoints: - return true, Convert_api_Endpoints_To_v1_Endpoints(a, b, s) + return true, Convert_core_Endpoints_To_v1_Endpoints(a, b, s) } case *metav1.WatchEvent: @@ -132,16 +132,16 @@ func addFastPathConversionFuncs(scheme *runtime.Scheme) error { func addConversionFuncs(scheme *runtime.Scheme) error { // Add non-generated conversion functions err := scheme.AddConversionFuncs( - Convert_api_Pod_To_v1_Pod, - Convert_api_PodSpec_To_v1_PodSpec, - Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, - Convert_api_ServiceSpec_To_v1_ServiceSpec, - Convert_v1_Pod_To_api_Pod, - Convert_v1_PodSpec_To_api_PodSpec, - Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec, - Convert_v1_Secret_To_api_Secret, - Convert_v1_ServiceSpec_To_api_ServiceSpec, - Convert_v1_ResourceList_To_api_ResourceList, + Convert_core_Pod_To_v1_Pod, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_Pod_To_core_Pod, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_v1_Secret_To_core_Secret, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_v1_ResourceList_To_core_ResourceList, Convert_v1_ReplicationController_to_extensions_ReplicaSet, Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec, Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus, @@ -157,20 +157,15 @@ func addConversionFuncs(scheme *runtime.Scheme) error { err = scheme.AddFieldLabelConversionFunc("v1", "Pod", func(label, value string) (string, string, error) { switch label { - case "metadata.annotations", - "metadata.labels", - "metadata.name", + case "metadata.name", "metadata.namespace", - "metadata.uid", "spec.nodeName", "spec.restartPolicy", - "spec.serviceAccountName", "spec.schedulerName", "status.phase", - "status.hostIP", "status.podIP": return label, value, nil - // This is for backwards compatibility with old v1 clients which send spec.host + // This is for backwards compatibility with old v1 clients which send spec.host case "spec.host": return "spec.nodeName", value, nil default: @@ -241,7 +236,7 @@ func Convert_v1_ReplicationControllerSpec_to_extensions_ReplicaSetSpec(in *v1.Re metav1.Convert_map_to_unversioned_LabelSelector(&in.Selector, out.Selector, s) } if in.Template != nil { - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, &out.Template, s); err != nil { + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, &out.Template, s); err != nil { return err } } @@ -257,7 +252,7 @@ func Convert_v1_ReplicationControllerStatus_to_extensions_ReplicaSetStatus(in *v for _, cond := range in.Conditions { out.Conditions = append(out.Conditions, extensions.ReplicaSetCondition{ Type: extensions.ReplicaSetConditionType(cond.Type), - Status: api.ConditionStatus(cond.Status), + Status: core.ConditionStatus(cond.Status), LastTransitionTime: cond.LastTransitionTime, Reason: cond.Reason, Message: cond.Message, @@ -293,7 +288,7 @@ func Convert_extensions_ReplicaSetSpec_to_v1_ReplicationControllerSpec(in *exten invalidErr = metav1.Convert_unversioned_LabelSelector_to_map(in.Selector, &out.Selector, s) } out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, out.Template, s); err != nil { return err } return invalidErr @@ -317,13 +312,13 @@ func Convert_extensions_ReplicaSetStatus_to_v1_ReplicationControllerStatus(in *e return nil } -func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *api.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { out.Replicas = &in.Replicas out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector if in.Template != nil { out.Template = new(v1.PodTemplateSpec) - if err := Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { @@ -332,15 +327,15 @@ func Convert_api_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *a return nil } -func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *api.ReplicationControllerSpec, s conversion.Scope) error { +func Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { if in.Replicas != nil { out.Replicas = *in.Replicas } out.MinReadySeconds = in.MinReadySeconds out.Selector = in.Selector if in.Template != nil { - out.Template = new(api.PodTemplateSpec) - if err := Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in.Template, out.Template, s); err != nil { + out.Template = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in.Template, out.Template, s); err != nil { return err } } else { @@ -349,16 +344,16 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *v return nil } -func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { +func Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil { return err } return nil } -func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil { +func Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in, out, s); err != nil { return err } @@ -367,8 +362,8 @@ func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *v1.PodTemplateSpec, o // The following two v1.PodSpec conversions are done here to support v1.ServiceAccount // as an alias for ServiceAccountName. -func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conversion.Scope) error { - if err := autoConvert_api_PodSpec_To_v1_PodSpec(in, out, s); err != nil { +func Convert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if err := autoConvert_core_PodSpec_To_v1_PodSpec(in, out, s); err != nil { return err } @@ -386,8 +381,8 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *v1.PodSpec, s conve return nil } -func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conversion.Scope) error { - if err := autoConvert_v1_PodSpec_To_api_PodSpec(in, out, s); err != nil { +func Convert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if err := autoConvert_v1_PodSpec_To_core_PodSpec(in, out, s); err != nil { return err } @@ -400,7 +395,7 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve // the host namespace fields have to be handled specially for backward compatibility // with v1.0.0 if out.SecurityContext == nil { - out.SecurityContext = new(api.PodSecurityContext) + out.SecurityContext = new(core.PodSecurityContext) } out.SecurityContext.HostNetwork = in.HostNetwork out.SecurityContext.HostPID = in.HostPID @@ -409,8 +404,8 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *v1.PodSpec, out *api.PodSpec, s conve return nil } -func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) error { - if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil { +func Convert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + if err := autoConvert_core_Pod_To_v1_Pod(in, out, s); err != nil { return err } @@ -431,8 +426,8 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *v1.Pod, s conversion.Scope) err return nil } -func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversion.Scope) error { - if err := autoConvert_v1_Secret_To_api_Secret(in, out, s); err != nil { +func Convert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + if err := autoConvert_v1_Secret_To_core_Secret(in, out, s); err != nil { return err } @@ -448,10 +443,10 @@ func Convert_v1_Secret_To_api_Secret(in *v1.Secret, out *api.Secret, s conversio return nil } -func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { +func Convert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { if in.Capabilities != nil { out.Capabilities = new(v1.Capabilities) - if err := Convert_api_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { + if err := Convert_core_Capabilities_To_v1_Capabilities(in.Capabilities, out.Capabilities, s); err != nil { return err } } else { @@ -460,7 +455,7 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, out.Privileged = in.Privileged if in.SELinuxOptions != nil { out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -473,11 +468,11 @@ func Convert_api_SecurityContext_To_v1_SecurityContext(in *api.SecurityContext, return nil } -func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { +func Convert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { out.SELinuxOptions = new(v1.SELinuxOptions) - if err := Convert_api_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + if err := Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -489,11 +484,11 @@ func Convert_api_PodSecurityContext_To_v1_PodSecurityContext(in *api.PodSecurity return nil } -func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityContext, out *api.PodSecurityContext, s conversion.Scope) error { +func Convert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { out.SupplementalGroups = in.SupplementalGroups if in.SELinuxOptions != nil { - out.SELinuxOptions = new(api.SELinuxOptions) - if err := Convert_v1_SELinuxOptions_To_api_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { + out.SELinuxOptions = new(core.SELinuxOptions) + if err := Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in.SELinuxOptions, out.SELinuxOptions, s); err != nil { return err } } else { @@ -506,12 +501,12 @@ func Convert_v1_PodSecurityContext_To_api_PodSecurityContext(in *v1.PodSecurityC } // +k8s:conversion-fn=copy-only -func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.ResourceList, s conversion.Scope) error { +func Convert_v1_ResourceList_To_core_ResourceList(in *v1.ResourceList, out *core.ResourceList, s conversion.Scope) error { if *in == nil { return nil } if *out == nil { - *out = make(api.ResourceList, len(*in)) + *out = make(core.ResourceList, len(*in)) } for key, val := range *in { // Moved to defaults @@ -520,7 +515,7 @@ func Convert_v1_ResourceList_To_api_ResourceList(in *v1.ResourceList, out *api.R // const milliScale = -3 // val.RoundUp(milliScale) - (*out)[api.ResourceName(key)] = val + (*out)[core.ResourceName(key)] = val } return nil } diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go index 02b1832d..c2aeafc3 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/defaults.go @@ -20,6 +20,8 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/parsers" utilpointer "k8s.io/kubernetes/pkg/util/pointer" ) @@ -39,14 +41,6 @@ func SetDefaults_ResourceList(obj *v1.ResourceList) { } } -func SetDefaults_PodExecOptions(obj *v1.PodExecOptions) { - obj.Stdout = true - obj.Stderr = true -} -func SetDefaults_PodAttachOptions(obj *v1.PodAttachOptions) { - obj.Stdout = true - obj.Stderr = true -} func SetDefaults_ReplicationController(obj *v1.ReplicationController) { var labels map[string]string if obj.Spec.Template != nil { @@ -236,17 +230,30 @@ func SetDefaults_PersistentVolume(obj *v1.PersistentVolume) { if obj.Spec.PersistentVolumeReclaimPolicy == "" { obj.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimRetain } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } } func SetDefaults_PersistentVolumeClaim(obj *v1.PersistentVolumeClaim) { if obj.Status.Phase == "" { obj.Status.Phase = v1.ClaimPending } + if obj.Spec.VolumeMode == nil && utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + obj.Spec.VolumeMode = new(v1.PersistentVolumeMode) + *obj.Spec.VolumeMode = v1.PersistentVolumeFilesystem + } } func SetDefaults_ISCSIVolumeSource(obj *v1.ISCSIVolumeSource) { if obj.ISCSIInterface == "" { obj.ISCSIInterface = "default" } } +func SetDefaults_ISCSIPersistentVolumeSource(obj *v1.ISCSIPersistentVolumeSource) { + if obj.ISCSIInterface == "" { + obj.ISCSIInterface = "default" + } +} func SetDefaults_AzureDiskVolumeSource(obj *v1.AzureDiskVolumeSource) { if obj.CachingMode == nil { obj.CachingMode = new(v1.AzureDataDiskCachingMode) diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go similarity index 73% rename from vendor/k8s.io/kubernetes/pkg/api/v1/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go index 0f1e0d49..454e3018 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/doc.go @@ -14,10 +14,10 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/api -// +k8s:conversion-gen-external-types=../../../vendor/k8s.io/api/core/v1 +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/core +// +k8s:conversion-gen-external-types=k8s.io/api/core/v1 // +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=../../../vendor/k8s.io/api/core/v1 +// +k8s:defaulter-gen-input=../../../../vendor/k8s.io/api/core/v1 // Package v1 is the v1 version of the API. -package v1 // import "k8s.io/kubernetes/pkg/api/v1" +package v1 // import "k8s.io/kubernetes/pkg/apis/core/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go similarity index 94% rename from vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go index 92cacf8d..4b21aefc 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/helper/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/helper/helpers.go @@ -26,14 +26,13 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/kubernetes/pkg/api/helper" + "k8s.io/kubernetes/pkg/apis/core/helper" ) // IsExtendedResourceName returns true if the resource name is not in the -// default namespace, or it has the opaque integer resource prefix. +// default namespace. func IsExtendedResourceName(name v1.ResourceName) bool { - // TODO: Remove OIR part following deprecation. - return !IsDefaultNamespaceResource(name) || IsOpaqueIntResourceName(name) + return !IsDefaultNamespaceResource(name) } // IsDefaultNamespaceResource returns true if the resource name is in the @@ -69,22 +68,6 @@ func HugePageSizeFromResourceName(name v1.ResourceName) (resource.Quantity, erro return resource.ParseQuantity(pageSize) } -// IsOpaqueIntResourceName returns true if the resource name has the opaque -// integer resource prefix. -func IsOpaqueIntResourceName(name v1.ResourceName) bool { - return strings.HasPrefix(string(name), v1.ResourceOpaqueIntPrefix) -} - -// OpaqueIntResourceName returns a ResourceName with the canonical opaque -// integer prefix prepended. If the argument already has the prefix, it is -// returned unmodified. -func OpaqueIntResourceName(name string) v1.ResourceName { - if IsOpaqueIntResourceName(v1.ResourceName(name)) { - return v1.ResourceName(name) - } - return v1.ResourceName(fmt.Sprintf("%s%s", v1.ResourceOpaqueIntPrefix, name)) -} - var overcommitBlacklist = sets.NewString(string(v1.ResourceNvidiaGPU)) // IsOvercommitAllowed returns true if the resource is in the default diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/register.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go similarity index 100% rename from vendor/k8s.io/kubernetes/pkg/api/v1/register.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/register.go diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go new file mode 100644 index 00000000..29102979 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.conversion.go @@ -0,0 +1,5620 @@ +// +build !ignore_autogenerated + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file was autogenerated by conversion-gen. Do not edit it manually! + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + core "k8s.io/kubernetes/pkg/apis/core" + unsafe "unsafe" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(scheme *runtime.Scheme) error { + return scheme.AddGeneratedConversionFuncs( + Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource, + Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource, + Convert_v1_Affinity_To_core_Affinity, + Convert_core_Affinity_To_v1_Affinity, + Convert_v1_AttachedVolume_To_core_AttachedVolume, + Convert_core_AttachedVolume_To_v1_AttachedVolume, + Convert_v1_AvoidPods_To_core_AvoidPods, + Convert_core_AvoidPods_To_v1_AvoidPods, + Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource, + Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource, + Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource, + Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource, + Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource, + Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource, + Convert_v1_Binding_To_core_Binding, + Convert_core_Binding_To_v1_Binding, + Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource, + Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource, + Convert_v1_Capabilities_To_core_Capabilities, + Convert_core_Capabilities_To_v1_Capabilities, + Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource, + Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource, + Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource, + Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource, + Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource, + Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource, + Convert_v1_ClientIPConfig_To_core_ClientIPConfig, + Convert_core_ClientIPConfig_To_v1_ClientIPConfig, + Convert_v1_ComponentCondition_To_core_ComponentCondition, + Convert_core_ComponentCondition_To_v1_ComponentCondition, + Convert_v1_ComponentStatus_To_core_ComponentStatus, + Convert_core_ComponentStatus_To_v1_ComponentStatus, + Convert_v1_ComponentStatusList_To_core_ComponentStatusList, + Convert_core_ComponentStatusList_To_v1_ComponentStatusList, + Convert_v1_ConfigMap_To_core_ConfigMap, + Convert_core_ConfigMap_To_v1_ConfigMap, + Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource, + Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource, + Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector, + Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector, + Convert_v1_ConfigMapList_To_core_ConfigMapList, + Convert_core_ConfigMapList_To_v1_ConfigMapList, + Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection, + Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection, + Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource, + Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource, + Convert_v1_Container_To_core_Container, + Convert_core_Container_To_v1_Container, + Convert_v1_ContainerImage_To_core_ContainerImage, + Convert_core_ContainerImage_To_v1_ContainerImage, + Convert_v1_ContainerPort_To_core_ContainerPort, + Convert_core_ContainerPort_To_v1_ContainerPort, + Convert_v1_ContainerState_To_core_ContainerState, + Convert_core_ContainerState_To_v1_ContainerState, + Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning, + Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning, + Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated, + Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated, + Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting, + Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting, + Convert_v1_ContainerStatus_To_core_ContainerStatus, + Convert_core_ContainerStatus_To_v1_ContainerStatus, + Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint, + Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint, + Convert_v1_DeleteOptions_To_core_DeleteOptions, + Convert_core_DeleteOptions_To_v1_DeleteOptions, + Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection, + Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection, + Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile, + Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile, + Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource, + Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource, + Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource, + Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource, + Convert_v1_EndpointAddress_To_core_EndpointAddress, + Convert_core_EndpointAddress_To_v1_EndpointAddress, + Convert_v1_EndpointPort_To_core_EndpointPort, + Convert_core_EndpointPort_To_v1_EndpointPort, + Convert_v1_EndpointSubset_To_core_EndpointSubset, + Convert_core_EndpointSubset_To_v1_EndpointSubset, + Convert_v1_Endpoints_To_core_Endpoints, + Convert_core_Endpoints_To_v1_Endpoints, + Convert_v1_EndpointsList_To_core_EndpointsList, + Convert_core_EndpointsList_To_v1_EndpointsList, + Convert_v1_EnvFromSource_To_core_EnvFromSource, + Convert_core_EnvFromSource_To_v1_EnvFromSource, + Convert_v1_EnvVar_To_core_EnvVar, + Convert_core_EnvVar_To_v1_EnvVar, + Convert_v1_EnvVarSource_To_core_EnvVarSource, + Convert_core_EnvVarSource_To_v1_EnvVarSource, + Convert_v1_Event_To_core_Event, + Convert_core_Event_To_v1_Event, + Convert_v1_EventList_To_core_EventList, + Convert_core_EventList_To_v1_EventList, + Convert_v1_EventSeries_To_core_EventSeries, + Convert_core_EventSeries_To_v1_EventSeries, + Convert_v1_EventSource_To_core_EventSource, + Convert_core_EventSource_To_v1_EventSource, + Convert_v1_ExecAction_To_core_ExecAction, + Convert_core_ExecAction_To_v1_ExecAction, + Convert_v1_FCVolumeSource_To_core_FCVolumeSource, + Convert_core_FCVolumeSource_To_v1_FCVolumeSource, + Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource, + Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource, + Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource, + Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource, + Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource, + Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource, + Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource, + Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource, + Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource, + Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource, + Convert_v1_HTTPGetAction_To_core_HTTPGetAction, + Convert_core_HTTPGetAction_To_v1_HTTPGetAction, + Convert_v1_HTTPHeader_To_core_HTTPHeader, + Convert_core_HTTPHeader_To_v1_HTTPHeader, + Convert_v1_Handler_To_core_Handler, + Convert_core_Handler_To_v1_Handler, + Convert_v1_HostAlias_To_core_HostAlias, + Convert_core_HostAlias_To_v1_HostAlias, + Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource, + Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource, + Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource, + Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource, + Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource, + Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource, + Convert_v1_KeyToPath_To_core_KeyToPath, + Convert_core_KeyToPath_To_v1_KeyToPath, + Convert_v1_Lifecycle_To_core_Lifecycle, + Convert_core_Lifecycle_To_v1_Lifecycle, + Convert_v1_LimitRange_To_core_LimitRange, + Convert_core_LimitRange_To_v1_LimitRange, + Convert_v1_LimitRangeItem_To_core_LimitRangeItem, + Convert_core_LimitRangeItem_To_v1_LimitRangeItem, + Convert_v1_LimitRangeList_To_core_LimitRangeList, + Convert_core_LimitRangeList_To_v1_LimitRangeList, + Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec, + Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec, + Convert_v1_List_To_core_List, + Convert_core_List_To_v1_List, + Convert_v1_ListOptions_To_core_ListOptions, + Convert_core_ListOptions_To_v1_ListOptions, + Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress, + Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress, + Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus, + Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus, + Convert_v1_LocalObjectReference_To_core_LocalObjectReference, + Convert_core_LocalObjectReference_To_v1_LocalObjectReference, + Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource, + Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource, + Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource, + Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource, + Convert_v1_Namespace_To_core_Namespace, + Convert_core_Namespace_To_v1_Namespace, + Convert_v1_NamespaceList_To_core_NamespaceList, + Convert_core_NamespaceList_To_v1_NamespaceList, + Convert_v1_NamespaceSpec_To_core_NamespaceSpec, + Convert_core_NamespaceSpec_To_v1_NamespaceSpec, + Convert_v1_NamespaceStatus_To_core_NamespaceStatus, + Convert_core_NamespaceStatus_To_v1_NamespaceStatus, + Convert_v1_Node_To_core_Node, + Convert_core_Node_To_v1_Node, + Convert_v1_NodeAddress_To_core_NodeAddress, + Convert_core_NodeAddress_To_v1_NodeAddress, + Convert_v1_NodeAffinity_To_core_NodeAffinity, + Convert_core_NodeAffinity_To_v1_NodeAffinity, + Convert_v1_NodeCondition_To_core_NodeCondition, + Convert_core_NodeCondition_To_v1_NodeCondition, + Convert_v1_NodeConfigSource_To_core_NodeConfigSource, + Convert_core_NodeConfigSource_To_v1_NodeConfigSource, + Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints, + Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints, + Convert_v1_NodeList_To_core_NodeList, + Convert_core_NodeList_To_v1_NodeList, + Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions, + Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions, + Convert_v1_NodeResources_To_core_NodeResources, + Convert_core_NodeResources_To_v1_NodeResources, + Convert_v1_NodeSelector_To_core_NodeSelector, + Convert_core_NodeSelector_To_v1_NodeSelector, + Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement, + Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement, + Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm, + Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm, + Convert_v1_NodeSpec_To_core_NodeSpec, + Convert_core_NodeSpec_To_v1_NodeSpec, + Convert_v1_NodeStatus_To_core_NodeStatus, + Convert_core_NodeStatus_To_v1_NodeStatus, + Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo, + Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo, + Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector, + Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector, + Convert_v1_ObjectMeta_To_core_ObjectMeta, + Convert_core_ObjectMeta_To_v1_ObjectMeta, + Convert_v1_ObjectReference_To_core_ObjectReference, + Convert_core_ObjectReference_To_v1_ObjectReference, + Convert_v1_PersistentVolume_To_core_PersistentVolume, + Convert_core_PersistentVolume_To_v1_PersistentVolume, + Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim, + Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim, + Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition, + Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition, + Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList, + Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList, + Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec, + Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec, + Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus, + Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus, + Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource, + Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource, + Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList, + Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList, + Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource, + Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource, + Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec, + Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec, + Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus, + Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus, + Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource, + Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource, + Convert_v1_Pod_To_core_Pod, + Convert_core_Pod_To_v1_Pod, + Convert_v1_PodAffinity_To_core_PodAffinity, + Convert_core_PodAffinity_To_v1_PodAffinity, + Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm, + Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm, + Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity, + Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity, + Convert_v1_PodAttachOptions_To_core_PodAttachOptions, + Convert_core_PodAttachOptions_To_v1_PodAttachOptions, + Convert_v1_PodCondition_To_core_PodCondition, + Convert_core_PodCondition_To_v1_PodCondition, + Convert_v1_PodDNSConfig_To_core_PodDNSConfig, + Convert_core_PodDNSConfig_To_v1_PodDNSConfig, + Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption, + Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption, + Convert_v1_PodExecOptions_To_core_PodExecOptions, + Convert_core_PodExecOptions_To_v1_PodExecOptions, + Convert_v1_PodList_To_core_PodList, + Convert_core_PodList_To_v1_PodList, + Convert_v1_PodLogOptions_To_core_PodLogOptions, + Convert_core_PodLogOptions_To_v1_PodLogOptions, + Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions, + Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions, + Convert_v1_PodProxyOptions_To_core_PodProxyOptions, + Convert_core_PodProxyOptions_To_v1_PodProxyOptions, + Convert_v1_PodSecurityContext_To_core_PodSecurityContext, + Convert_core_PodSecurityContext_To_v1_PodSecurityContext, + Convert_v1_PodSignature_To_core_PodSignature, + Convert_core_PodSignature_To_v1_PodSignature, + Convert_v1_PodSpec_To_core_PodSpec, + Convert_core_PodSpec_To_v1_PodSpec, + Convert_v1_PodStatus_To_core_PodStatus, + Convert_core_PodStatus_To_v1_PodStatus, + Convert_v1_PodStatusResult_To_core_PodStatusResult, + Convert_core_PodStatusResult_To_v1_PodStatusResult, + Convert_v1_PodTemplate_To_core_PodTemplate, + Convert_core_PodTemplate_To_v1_PodTemplate, + Convert_v1_PodTemplateList_To_core_PodTemplateList, + Convert_core_PodTemplateList_To_v1_PodTemplateList, + Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec, + Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec, + Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource, + Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource, + Convert_v1_Preconditions_To_core_Preconditions, + Convert_core_Preconditions_To_v1_Preconditions, + Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry, + Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry, + Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm, + Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm, + Convert_v1_Probe_To_core_Probe, + Convert_core_Probe_To_v1_Probe, + Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource, + Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource, + Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource, + Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource, + Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource, + Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource, + Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource, + Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource, + Convert_v1_RangeAllocation_To_core_RangeAllocation, + Convert_core_RangeAllocation_To_v1_RangeAllocation, + Convert_v1_ReplicationController_To_core_ReplicationController, + Convert_core_ReplicationController_To_v1_ReplicationController, + Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition, + Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition, + Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList, + Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList, + Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec, + Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec, + Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus, + Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus, + Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector, + Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector, + Convert_v1_ResourceQuota_To_core_ResourceQuota, + Convert_core_ResourceQuota_To_v1_ResourceQuota, + Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList, + Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList, + Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec, + Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec, + Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus, + Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus, + Convert_v1_ResourceRequirements_To_core_ResourceRequirements, + Convert_core_ResourceRequirements_To_v1_ResourceRequirements, + Convert_v1_SELinuxOptions_To_core_SELinuxOptions, + Convert_core_SELinuxOptions_To_v1_SELinuxOptions, + Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource, + Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource, + Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource, + Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource, + Convert_v1_Secret_To_core_Secret, + Convert_core_Secret_To_v1_Secret, + Convert_v1_SecretEnvSource_To_core_SecretEnvSource, + Convert_core_SecretEnvSource_To_v1_SecretEnvSource, + Convert_v1_SecretKeySelector_To_core_SecretKeySelector, + Convert_core_SecretKeySelector_To_v1_SecretKeySelector, + Convert_v1_SecretList_To_core_SecretList, + Convert_core_SecretList_To_v1_SecretList, + Convert_v1_SecretProjection_To_core_SecretProjection, + Convert_core_SecretProjection_To_v1_SecretProjection, + Convert_v1_SecretReference_To_core_SecretReference, + Convert_core_SecretReference_To_v1_SecretReference, + Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource, + Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource, + Convert_v1_SecurityContext_To_core_SecurityContext, + Convert_core_SecurityContext_To_v1_SecurityContext, + Convert_v1_SerializedReference_To_core_SerializedReference, + Convert_core_SerializedReference_To_v1_SerializedReference, + Convert_v1_Service_To_core_Service, + Convert_core_Service_To_v1_Service, + Convert_v1_ServiceAccount_To_core_ServiceAccount, + Convert_core_ServiceAccount_To_v1_ServiceAccount, + Convert_v1_ServiceAccountList_To_core_ServiceAccountList, + Convert_core_ServiceAccountList_To_v1_ServiceAccountList, + Convert_v1_ServiceList_To_core_ServiceList, + Convert_core_ServiceList_To_v1_ServiceList, + Convert_v1_ServicePort_To_core_ServicePort, + Convert_core_ServicePort_To_v1_ServicePort, + Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions, + Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions, + Convert_v1_ServiceSpec_To_core_ServiceSpec, + Convert_core_ServiceSpec_To_v1_ServiceSpec, + Convert_v1_ServiceStatus_To_core_ServiceStatus, + Convert_core_ServiceStatus_To_v1_ServiceStatus, + Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig, + Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig, + Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource, + Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource, + Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource, + Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource, + Convert_v1_Sysctl_To_core_Sysctl, + Convert_core_Sysctl_To_v1_Sysctl, + Convert_v1_TCPSocketAction_To_core_TCPSocketAction, + Convert_core_TCPSocketAction_To_v1_TCPSocketAction, + Convert_v1_Taint_To_core_Taint, + Convert_core_Taint_To_v1_Taint, + Convert_v1_Toleration_To_core_Toleration, + Convert_core_Toleration_To_v1_Toleration, + Convert_v1_Volume_To_core_Volume, + Convert_core_Volume_To_v1_Volume, + Convert_v1_VolumeDevice_To_core_VolumeDevice, + Convert_core_VolumeDevice_To_v1_VolumeDevice, + Convert_v1_VolumeMount_To_core_VolumeMount, + Convert_core_VolumeMount_To_v1_VolumeMount, + Convert_v1_VolumeProjection_To_core_VolumeProjection, + Convert_core_VolumeProjection_To_v1_VolumeProjection, + Convert_v1_VolumeSource_To_core_VolumeSource, + Convert_core_VolumeSource_To_v1_VolumeSource, + Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource, + Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource, + Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm, + Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm, + ) +} + +func autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in *v1.AWSElasticBlockStoreVolumeSource, out *core.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AWSElasticBlockStoreVolumeSource_To_core_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource is an autogenerated conversion function. +func Convert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in *core.AWSElasticBlockStoreVolumeSource, out *v1.AWSElasticBlockStoreVolumeSource, s conversion.Scope) error { + return autoConvert_core_AWSElasticBlockStoreVolumeSource_To_v1_AWSElasticBlockStoreVolumeSource(in, out, s) +} + +func autoConvert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*core.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*core.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*core.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_v1_Affinity_To_core_Affinity is an autogenerated conversion function. +func Convert_v1_Affinity_To_core_Affinity(in *v1.Affinity, out *core.Affinity, s conversion.Scope) error { + return autoConvert_v1_Affinity_To_core_Affinity(in, out, s) +} + +func autoConvert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + out.NodeAffinity = (*v1.NodeAffinity)(unsafe.Pointer(in.NodeAffinity)) + out.PodAffinity = (*v1.PodAffinity)(unsafe.Pointer(in.PodAffinity)) + out.PodAntiAffinity = (*v1.PodAntiAffinity)(unsafe.Pointer(in.PodAntiAffinity)) + return nil +} + +// Convert_core_Affinity_To_v1_Affinity is an autogenerated conversion function. +func Convert_core_Affinity_To_v1_Affinity(in *core.Affinity, out *v1.Affinity, s conversion.Scope) error { + return autoConvert_core_Affinity_To_v1_Affinity(in, out, s) +} + +func autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + out.Name = core.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_AttachedVolume_To_core_AttachedVolume is an autogenerated conversion function. +func Convert_v1_AttachedVolume_To_core_AttachedVolume(in *v1.AttachedVolume, out *core.AttachedVolume, s conversion.Scope) error { + return autoConvert_v1_AttachedVolume_To_core_AttachedVolume(in, out, s) +} + +func autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + out.Name = v1.UniqueVolumeName(in.Name) + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_AttachedVolume_To_v1_AttachedVolume is an autogenerated conversion function. +func Convert_core_AttachedVolume_To_v1_AttachedVolume(in *core.AttachedVolume, out *v1.AttachedVolume, s conversion.Scope) error { + return autoConvert_core_AttachedVolume_To_v1_AttachedVolume(in, out, s) +} + +func autoConvert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]core.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_v1_AvoidPods_To_core_AvoidPods is an autogenerated conversion function. +func Convert_v1_AvoidPods_To_core_AvoidPods(in *v1.AvoidPods, out *core.AvoidPods, s conversion.Scope) error { + return autoConvert_v1_AvoidPods_To_core_AvoidPods(in, out, s) +} + +func autoConvert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + out.PreferAvoidPods = *(*[]v1.PreferAvoidPodsEntry)(unsafe.Pointer(&in.PreferAvoidPods)) + return nil +} + +// Convert_core_AvoidPods_To_v1_AvoidPods is an autogenerated conversion function. +func Convert_core_AvoidPods_To_v1_AvoidPods(in *core.AvoidPods, out *v1.AvoidPods, s conversion.Scope) error { + return autoConvert_core_AvoidPods_To_v1_AvoidPods(in, out, s) +} + +func autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*core.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*core.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in *v1.AzureDiskVolumeSource, out *core.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureDiskVolumeSource_To_core_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + out.DiskName = in.DiskName + out.DataDiskURI = in.DataDiskURI + out.CachingMode = (*v1.AzureDataDiskCachingMode)(unsafe.Pointer(in.CachingMode)) + out.FSType = (*string)(unsafe.Pointer(in.FSType)) + out.ReadOnly = (*bool)(unsafe.Pointer(in.ReadOnly)) + out.Kind = (*v1.AzureDataDiskKind)(unsafe.Pointer(in.Kind)) + return nil +} + +// Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource is an autogenerated conversion function. +func Convert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in *core.AzureDiskVolumeSource, out *v1.AzureDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureDiskVolumeSource_To_v1_AzureDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in *v1.AzureFilePersistentVolumeSource, out *core.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFilePersistentVolumeSource_To_core_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + out.SecretNamespace = (*string)(unsafe.Pointer(in.SecretNamespace)) + return nil +} + +// Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in *core.AzureFilePersistentVolumeSource, out *v1.AzureFilePersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFilePersistentVolumeSource_To_v1_AzureFilePersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in *v1.AzureFileVolumeSource, out *core.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_v1_AzureFileVolumeSource_To_core_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.ShareName = in.ShareName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource is an autogenerated conversion function. +func Convert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in *core.AzureFileVolumeSource, out *v1.AzureFileVolumeSource, s conversion.Scope) error { + return autoConvert_core_AzureFileVolumeSource_To_v1_AzureFileVolumeSource(in, out, s) +} + +func autoConvert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Binding_To_core_Binding is an autogenerated conversion function. +func Convert_v1_Binding_To_core_Binding(in *v1.Binding, out *core.Binding, s conversion.Scope) error { + return autoConvert_v1_Binding_To_core_Binding(in, out, s) +} + +func autoConvert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Target, &out.Target, s); err != nil { + return err + } + return nil +} + +// Convert_core_Binding_To_v1_Binding is an autogenerated conversion function. +func Convert_core_Binding_To_v1_Binding(in *core.Binding, out *v1.Binding, s conversion.Scope) error { + return autoConvert_core_Binding_To_v1_Binding(in, out, s) +} + +func autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in *v1.CSIPersistentVolumeSource, out *core.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CSIPersistentVolumeSource_To_core_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.VolumeHandle = in.VolumeHandle + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in *core.CSIPersistentVolumeSource, out *v1.CSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CSIPersistentVolumeSource_To_v1_CSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + out.Add = *(*[]core.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]core.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_v1_Capabilities_To_core_Capabilities is an autogenerated conversion function. +func Convert_v1_Capabilities_To_core_Capabilities(in *v1.Capabilities, out *core.Capabilities, s conversion.Scope) error { + return autoConvert_v1_Capabilities_To_core_Capabilities(in, out, s) +} + +func autoConvert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + out.Add = *(*[]v1.Capability)(unsafe.Pointer(&in.Add)) + out.Drop = *(*[]v1.Capability)(unsafe.Pointer(&in.Drop)) + return nil +} + +// Convert_core_Capabilities_To_v1_Capabilities is an autogenerated conversion function. +func Convert_core_Capabilities_To_v1_Capabilities(in *core.Capabilities, out *v1.Capabilities, s conversion.Scope) error { + return autoConvert_core_Capabilities_To_v1_Capabilities(in, out, s) +} + +func autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in *v1.CephFSPersistentVolumeSource, out *core.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSPersistentVolumeSource_To_core_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in *core.CephFSPersistentVolumeSource, out *v1.CephFSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSPersistentVolumeSource_To_v1_CephFSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource is an autogenerated conversion function. +func Convert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in *v1.CephFSVolumeSource, out *core.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CephFSVolumeSource_To_core_CephFSVolumeSource(in, out, s) +} + +func autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + out.Monitors = *(*[]string)(unsafe.Pointer(&in.Monitors)) + out.Path = in.Path + out.User = in.User + out.SecretFile = in.SecretFile + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource is an autogenerated conversion function. +func Convert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in *core.CephFSVolumeSource, out *v1.CephFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_CephFSVolumeSource_To_v1_CephFSVolumeSource(in, out, s) +} + +func autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource is an autogenerated conversion function. +func Convert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in *v1.CinderVolumeSource, out *core.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_v1_CinderVolumeSource_To_core_CinderVolumeSource(in, out, s) +} + +func autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource is an autogenerated conversion function. +func Convert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in *core.CinderVolumeSource, out *v1.CinderVolumeSource, s conversion.Scope) error { + return autoConvert_core_CinderVolumeSource_To_v1_CinderVolumeSource(in, out, s) +} + +func autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ClientIPConfig_To_core_ClientIPConfig is an autogenerated conversion function. +func Convert_v1_ClientIPConfig_To_core_ClientIPConfig(in *v1.ClientIPConfig, out *core.ClientIPConfig, s conversion.Scope) error { + return autoConvert_v1_ClientIPConfig_To_core_ClientIPConfig(in, out, s) +} + +func autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + out.TimeoutSeconds = (*int32)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ClientIPConfig_To_v1_ClientIPConfig is an autogenerated conversion function. +func Convert_core_ClientIPConfig_To_v1_ClientIPConfig(in *core.ClientIPConfig, out *v1.ClientIPConfig, s conversion.Scope) error { + return autoConvert_core_ClientIPConfig_To_v1_ClientIPConfig(in, out, s) +} + +func autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + out.Type = core.ComponentConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_v1_ComponentCondition_To_core_ComponentCondition is an autogenerated conversion function. +func Convert_v1_ComponentCondition_To_core_ComponentCondition(in *v1.ComponentCondition, out *core.ComponentCondition, s conversion.Scope) error { + return autoConvert_v1_ComponentCondition_To_core_ComponentCondition(in, out, s) +} + +func autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + out.Type = v1.ComponentConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.Message = in.Message + out.Error = in.Error + return nil +} + +// Convert_core_ComponentCondition_To_v1_ComponentCondition is an autogenerated conversion function. +func Convert_core_ComponentCondition_To_v1_ComponentCondition(in *core.ComponentCondition, out *v1.ComponentCondition, s conversion.Scope) error { + return autoConvert_core_ComponentCondition_To_v1_ComponentCondition(in, out, s) +} + +func autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]core.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ComponentStatus_To_core_ComponentStatus is an autogenerated conversion function. +func Convert_v1_ComponentStatus_To_core_ComponentStatus(in *v1.ComponentStatus, out *core.ComponentStatus, s conversion.Scope) error { + return autoConvert_v1_ComponentStatus_To_core_ComponentStatus(in, out, s) +} + +func autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Conditions = *(*[]v1.ComponentCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ComponentStatus_To_v1_ComponentStatus is an autogenerated conversion function. +func Convert_core_ComponentStatus_To_v1_ComponentStatus(in *core.ComponentStatus, out *v1.ComponentStatus, s conversion.Scope) error { + return autoConvert_core_ComponentStatus_To_v1_ComponentStatus(in, out, s) +} + +func autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ComponentStatusList_To_core_ComponentStatusList is an autogenerated conversion function. +func Convert_v1_ComponentStatusList_To_core_ComponentStatusList(in *v1.ComponentStatusList, out *core.ComponentStatusList, s conversion.Scope) error { + return autoConvert_v1_ComponentStatusList_To_core_ComponentStatusList(in, out, s) +} + +func autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ComponentStatus)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ComponentStatusList_To_v1_ComponentStatusList is an autogenerated conversion function. +func Convert_core_ComponentStatusList_To_v1_ComponentStatusList(in *core.ComponentStatusList, out *v1.ComponentStatusList, s conversion.Scope) error { + return autoConvert_core_ComponentStatusList_To_v1_ComponentStatusList(in, out, s) +} + +func autoConvert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_ConfigMap_To_core_ConfigMap is an autogenerated conversion function. +func Convert_v1_ConfigMap_To_core_ConfigMap(in *v1.ConfigMap, out *core.ConfigMap, s conversion.Scope) error { + return autoConvert_v1_ConfigMap_To_core_ConfigMap(in, out, s) +} + +func autoConvert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string]string)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_ConfigMap_To_v1_ConfigMap is an autogenerated conversion function. +func Convert_core_ConfigMap_To_v1_ConfigMap(in *core.ConfigMap, out *v1.ConfigMap, s conversion.Scope) error { + return autoConvert_core_ConfigMap_To_v1_ConfigMap(in, out, s) +} + +func autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in *v1.ConfigMapEnvSource, out *core.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapEnvSource_To_core_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource is an autogenerated conversion function. +func Convert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in *core.ConfigMapEnvSource, out *v1.ConfigMapEnvSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapEnvSource_To_v1_ConfigMapEnvSource(in, out, s) +} + +func autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in *v1.ConfigMapKeySelector, out *core.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_v1_ConfigMapKeySelector_To_core_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector is an autogenerated conversion function. +func Convert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in *core.ConfigMapKeySelector, out *v1.ConfigMapKeySelector, s conversion.Scope) error { + return autoConvert_core_ConfigMapKeySelector_To_v1_ConfigMapKeySelector(in, out, s) +} + +func autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ConfigMapList_To_core_ConfigMapList is an autogenerated conversion function. +func Convert_v1_ConfigMapList_To_core_ConfigMapList(in *v1.ConfigMapList, out *core.ConfigMapList, s conversion.Scope) error { + return autoConvert_v1_ConfigMapList_To_core_ConfigMapList(in, out, s) +} + +func autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ConfigMap)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ConfigMapList_To_v1_ConfigMapList is an autogenerated conversion function. +func Convert_core_ConfigMapList_To_v1_ConfigMapList(in *core.ConfigMapList, out *v1.ConfigMapList, s conversion.Scope) error { + return autoConvert_core_ConfigMapList_To_v1_ConfigMapList(in, out, s) +} + +func autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection is an autogenerated conversion function. +func Convert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in *v1.ConfigMapProjection, out *core.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_v1_ConfigMapProjection_To_core_ConfigMapProjection(in, out, s) +} + +func autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection is an autogenerated conversion function. +func Convert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in *core.ConfigMapProjection, out *v1.ConfigMapProjection, s conversion.Scope) error { + return autoConvert_core_ConfigMapProjection_To_v1_ConfigMapProjection(in, out, s) +} + +func autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in *v1.ConfigMapVolumeSource, out *core.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ConfigMapVolumeSource_To_core_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource is an autogenerated conversion function. +func Convert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in *core.ConfigMapVolumeSource, out *v1.ConfigMapVolumeSource, s conversion.Scope) error { + return autoConvert_core_ConfigMapVolumeSource_To_v1_ConfigMapVolumeSource(in, out, s) +} + +func autoConvert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]core.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]core.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]core.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]core.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]core.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*core.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*core.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*core.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = core.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = core.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.SecurityContext) + if err := Convert_v1_SecurityContext_To_core_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_v1_Container_To_core_Container is an autogenerated conversion function. +func Convert_v1_Container_To_core_Container(in *v1.Container, out *core.Container, s conversion.Scope) error { + return autoConvert_v1_Container_To_core_Container(in, out, s) +} + +func autoConvert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + out.Name = in.Name + out.Image = in.Image + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + out.Args = *(*[]string)(unsafe.Pointer(&in.Args)) + out.WorkingDir = in.WorkingDir + out.Ports = *(*[]v1.ContainerPort)(unsafe.Pointer(&in.Ports)) + out.EnvFrom = *(*[]v1.EnvFromSource)(unsafe.Pointer(&in.EnvFrom)) + out.Env = *(*[]v1.EnvVar)(unsafe.Pointer(&in.Env)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeMounts = *(*[]v1.VolumeMount)(unsafe.Pointer(&in.VolumeMounts)) + out.VolumeDevices = *(*[]v1.VolumeDevice)(unsafe.Pointer(&in.VolumeDevices)) + out.LivenessProbe = (*v1.Probe)(unsafe.Pointer(in.LivenessProbe)) + out.ReadinessProbe = (*v1.Probe)(unsafe.Pointer(in.ReadinessProbe)) + out.Lifecycle = (*v1.Lifecycle)(unsafe.Pointer(in.Lifecycle)) + out.TerminationMessagePath = in.TerminationMessagePath + out.TerminationMessagePolicy = v1.TerminationMessagePolicy(in.TerminationMessagePolicy) + out.ImagePullPolicy = v1.PullPolicy(in.ImagePullPolicy) + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.SecurityContext) + if err := Convert_core_SecurityContext_To_v1_SecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.Stdin = in.Stdin + out.StdinOnce = in.StdinOnce + out.TTY = in.TTY + return nil +} + +// Convert_core_Container_To_v1_Container is an autogenerated conversion function. +func Convert_core_Container_To_v1_Container(in *core.Container, out *v1.Container, s conversion.Scope) error { + return autoConvert_core_Container_To_v1_Container(in, out, s) +} + +func autoConvert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_v1_ContainerImage_To_core_ContainerImage is an autogenerated conversion function. +func Convert_v1_ContainerImage_To_core_ContainerImage(in *v1.ContainerImage, out *core.ContainerImage, s conversion.Scope) error { + return autoConvert_v1_ContainerImage_To_core_ContainerImage(in, out, s) +} + +func autoConvert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + out.Names = *(*[]string)(unsafe.Pointer(&in.Names)) + out.SizeBytes = in.SizeBytes + return nil +} + +// Convert_core_ContainerImage_To_v1_ContainerImage is an autogenerated conversion function. +func Convert_core_ContainerImage_To_v1_ContainerImage(in *core.ContainerImage, out *v1.ContainerImage, s conversion.Scope) error { + return autoConvert_core_ContainerImage_To_v1_ContainerImage(in, out, s) +} + +func autoConvert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = core.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_v1_ContainerPort_To_core_ContainerPort is an autogenerated conversion function. +func Convert_v1_ContainerPort_To_core_ContainerPort(in *v1.ContainerPort, out *core.ContainerPort, s conversion.Scope) error { + return autoConvert_v1_ContainerPort_To_core_ContainerPort(in, out, s) +} + +func autoConvert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + out.Name = in.Name + out.HostPort = in.HostPort + out.ContainerPort = in.ContainerPort + out.Protocol = v1.Protocol(in.Protocol) + out.HostIP = in.HostIP + return nil +} + +// Convert_core_ContainerPort_To_v1_ContainerPort is an autogenerated conversion function. +func Convert_core_ContainerPort_To_v1_ContainerPort(in *core.ContainerPort, out *v1.ContainerPort, s conversion.Scope) error { + return autoConvert_core_ContainerPort_To_v1_ContainerPort(in, out, s) +} + +func autoConvert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + out.Waiting = (*core.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*core.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*core.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_v1_ContainerState_To_core_ContainerState is an autogenerated conversion function. +func Convert_v1_ContainerState_To_core_ContainerState(in *v1.ContainerState, out *core.ContainerState, s conversion.Scope) error { + return autoConvert_v1_ContainerState_To_core_ContainerState(in, out, s) +} + +func autoConvert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + out.Waiting = (*v1.ContainerStateWaiting)(unsafe.Pointer(in.Waiting)) + out.Running = (*v1.ContainerStateRunning)(unsafe.Pointer(in.Running)) + out.Terminated = (*v1.ContainerStateTerminated)(unsafe.Pointer(in.Terminated)) + return nil +} + +// Convert_core_ContainerState_To_v1_ContainerState is an autogenerated conversion function. +func Convert_core_ContainerState_To_v1_ContainerState(in *core.ContainerState, out *v1.ContainerState, s conversion.Scope) error { + return autoConvert_core_ContainerState_To_v1_ContainerState(in, out, s) +} + +func autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning is an autogenerated conversion function. +func Convert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in *v1.ContainerStateRunning, out *core.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_v1_ContainerStateRunning_To_core_ContainerStateRunning(in, out, s) +} + +func autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + out.StartedAt = in.StartedAt + return nil +} + +// Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning is an autogenerated conversion function. +func Convert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in *core.ContainerStateRunning, out *v1.ContainerStateRunning, s conversion.Scope) error { + return autoConvert_core_ContainerStateRunning_To_v1_ContainerStateRunning(in, out, s) +} + +func autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated is an autogenerated conversion function. +func Convert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in *v1.ContainerStateTerminated, out *core.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_v1_ContainerStateTerminated_To_core_ContainerStateTerminated(in, out, s) +} + +func autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + out.ExitCode = in.ExitCode + out.Signal = in.Signal + out.Reason = in.Reason + out.Message = in.Message + out.StartedAt = in.StartedAt + out.FinishedAt = in.FinishedAt + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated is an autogenerated conversion function. +func Convert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in *core.ContainerStateTerminated, out *v1.ContainerStateTerminated, s conversion.Scope) error { + return autoConvert_core_ContainerStateTerminated_To_v1_ContainerStateTerminated(in, out, s) +} + +func autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting is an autogenerated conversion function. +func Convert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in *v1.ContainerStateWaiting, out *core.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_v1_ContainerStateWaiting_To_core_ContainerStateWaiting(in, out, s) +} + +func autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting is an autogenerated conversion function. +func Convert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in *core.ContainerStateWaiting, out *v1.ContainerStateWaiting, s conversion.Scope) error { + return autoConvert_core_ContainerStateWaiting_To_v1_ContainerStateWaiting(in, out, s) +} + +func autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_v1_ContainerState_To_core_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_v1_ContainerStatus_To_core_ContainerStatus is an autogenerated conversion function. +func Convert_v1_ContainerStatus_To_core_ContainerStatus(in *v1.ContainerStatus, out *core.ContainerStatus, s conversion.Scope) error { + return autoConvert_v1_ContainerStatus_To_core_ContainerStatus(in, out, s) +} + +func autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.State, &out.State, s); err != nil { + return err + } + if err := Convert_core_ContainerState_To_v1_ContainerState(&in.LastTerminationState, &out.LastTerminationState, s); err != nil { + return err + } + out.Ready = in.Ready + out.RestartCount = in.RestartCount + out.Image = in.Image + out.ImageID = in.ImageID + out.ContainerID = in.ContainerID + return nil +} + +// Convert_core_ContainerStatus_To_v1_ContainerStatus is an autogenerated conversion function. +func Convert_core_ContainerStatus_To_v1_ContainerStatus(in *core.ContainerStatus, out *v1.ContainerStatus, s conversion.Scope) error { + return autoConvert_core_ContainerStatus_To_v1_ContainerStatus(in, out, s) +} + +func autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint is an autogenerated conversion function. +func Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in *v1.DaemonEndpoint, out *core.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_v1_DaemonEndpoint_To_core_DaemonEndpoint(in, out, s) +} + +func autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + out.Port = in.Port + return nil +} + +// Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint is an autogenerated conversion function. +func Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in *core.DaemonEndpoint, out *v1.DaemonEndpoint, s conversion.Scope) error { + return autoConvert_core_DaemonEndpoint_To_v1_DaemonEndpoint(in, out, s) +} + +func autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*core.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*core.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_v1_DeleteOptions_To_core_DeleteOptions is an autogenerated conversion function. +func Convert_v1_DeleteOptions_To_core_DeleteOptions(in *v1.DeleteOptions, out *core.DeleteOptions, s conversion.Scope) error { + return autoConvert_v1_DeleteOptions_To_core_DeleteOptions(in, out, s) +} + +func autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + out.GracePeriodSeconds = (*int64)(unsafe.Pointer(in.GracePeriodSeconds)) + out.Preconditions = (*v1.Preconditions)(unsafe.Pointer(in.Preconditions)) + out.OrphanDependents = (*bool)(unsafe.Pointer(in.OrphanDependents)) + out.PropagationPolicy = (*v1.DeletionPropagation)(unsafe.Pointer(in.PropagationPolicy)) + return nil +} + +// Convert_core_DeleteOptions_To_v1_DeleteOptions is an autogenerated conversion function. +func Convert_core_DeleteOptions_To_v1_DeleteOptions(in *core.DeleteOptions, out *v1.DeleteOptions, s conversion.Scope) error { + return autoConvert_core_DeleteOptions_To_v1_DeleteOptions(in, out, s) +} + +func autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection is an autogenerated conversion function. +func Convert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in *v1.DownwardAPIProjection, out *core.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIProjection_To_core_DownwardAPIProjection(in, out, s) +} + +func autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection is an autogenerated conversion function. +func Convert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in *core.DownwardAPIProjection, out *v1.DownwardAPIProjection, s conversion.Scope) error { + return autoConvert_core_DownwardAPIProjection_To_v1_DownwardAPIProjection(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in *v1.DownwardAPIVolumeFile, out *core.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeFile_To_core_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + out.Path = in.Path + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in *core.DownwardAPIVolumeFile, out *v1.DownwardAPIVolumeFile, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeFile_To_v1_DownwardAPIVolumeFile(in, out, s) +} + +func autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]core.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in *v1.DownwardAPIVolumeSource, out *core.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_DownwardAPIVolumeSource_To_core_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + out.Items = *(*[]v1.DownwardAPIVolumeFile)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource is an autogenerated conversion function. +func Convert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in *core.DownwardAPIVolumeSource, out *v1.DownwardAPIVolumeSource, s conversion.Scope) error { + return autoConvert_core_DownwardAPIVolumeSource_To_v1_DownwardAPIVolumeSource(in, out, s) +} + +func autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = core.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in *v1.EmptyDirVolumeSource, out *core.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_v1_EmptyDirVolumeSource_To_core_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + out.Medium = v1.StorageMedium(in.Medium) + out.SizeLimit = (*resource.Quantity)(unsafe.Pointer(in.SizeLimit)) + return nil +} + +// Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource is an autogenerated conversion function. +func Convert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in *core.EmptyDirVolumeSource, out *v1.EmptyDirVolumeSource, s conversion.Scope) error { + return autoConvert_core_EmptyDirVolumeSource_To_v1_EmptyDirVolumeSource(in, out, s) +} + +func autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*core.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_v1_EndpointAddress_To_core_EndpointAddress is an autogenerated conversion function. +func Convert_v1_EndpointAddress_To_core_EndpointAddress(in *v1.EndpointAddress, out *core.EndpointAddress, s conversion.Scope) error { + return autoConvert_v1_EndpointAddress_To_core_EndpointAddress(in, out, s) +} + +func autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + out.NodeName = (*string)(unsafe.Pointer(in.NodeName)) + out.TargetRef = (*v1.ObjectReference)(unsafe.Pointer(in.TargetRef)) + return nil +} + +// Convert_core_EndpointAddress_To_v1_EndpointAddress is an autogenerated conversion function. +func Convert_core_EndpointAddress_To_v1_EndpointAddress(in *core.EndpointAddress, out *v1.EndpointAddress, s conversion.Scope) error { + return autoConvert_core_EndpointAddress_To_v1_EndpointAddress(in, out, s) +} + +func autoConvert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = core.Protocol(in.Protocol) + return nil +} + +// Convert_v1_EndpointPort_To_core_EndpointPort is an autogenerated conversion function. +func Convert_v1_EndpointPort_To_core_EndpointPort(in *v1.EndpointPort, out *core.EndpointPort, s conversion.Scope) error { + return autoConvert_v1_EndpointPort_To_core_EndpointPort(in, out, s) +} + +func autoConvert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + out.Name = in.Name + out.Port = in.Port + out.Protocol = v1.Protocol(in.Protocol) + return nil +} + +// Convert_core_EndpointPort_To_v1_EndpointPort is an autogenerated conversion function. +func Convert_core_EndpointPort_To_v1_EndpointPort(in *core.EndpointPort, out *v1.EndpointPort, s conversion.Scope) error { + return autoConvert_core_EndpointPort_To_v1_EndpointPort(in, out, s) +} + +func autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]core.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]core.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_EndpointSubset_To_core_EndpointSubset is an autogenerated conversion function. +func Convert_v1_EndpointSubset_To_core_EndpointSubset(in *v1.EndpointSubset, out *core.EndpointSubset, s conversion.Scope) error { + return autoConvert_v1_EndpointSubset_To_core_EndpointSubset(in, out, s) +} + +func autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + out.Addresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.Addresses)) + out.NotReadyAddresses = *(*[]v1.EndpointAddress)(unsafe.Pointer(&in.NotReadyAddresses)) + out.Ports = *(*[]v1.EndpointPort)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_EndpointSubset_To_v1_EndpointSubset is an autogenerated conversion function. +func Convert_core_EndpointSubset_To_v1_EndpointSubset(in *core.EndpointSubset, out *v1.EndpointSubset, s conversion.Scope) error { + return autoConvert_core_EndpointSubset_To_v1_EndpointSubset(in, out, s) +} + +func autoConvert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]core.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_v1_Endpoints_To_core_Endpoints is an autogenerated conversion function. +func Convert_v1_Endpoints_To_core_Endpoints(in *v1.Endpoints, out *core.Endpoints, s conversion.Scope) error { + return autoConvert_v1_Endpoints_To_core_Endpoints(in, out, s) +} + +func autoConvert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Subsets = *(*[]v1.EndpointSubset)(unsafe.Pointer(&in.Subsets)) + return nil +} + +// Convert_core_Endpoints_To_v1_Endpoints is an autogenerated conversion function. +func Convert_core_Endpoints_To_v1_Endpoints(in *core.Endpoints, out *v1.Endpoints, s conversion.Scope) error { + return autoConvert_core_Endpoints_To_v1_Endpoints(in, out, s) +} + +func autoConvert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EndpointsList_To_core_EndpointsList is an autogenerated conversion function. +func Convert_v1_EndpointsList_To_core_EndpointsList(in *v1.EndpointsList, out *core.EndpointsList, s conversion.Scope) error { + return autoConvert_v1_EndpointsList_To_core_EndpointsList(in, out, s) +} + +func autoConvert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Endpoints)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EndpointsList_To_v1_EndpointsList is an autogenerated conversion function. +func Convert_core_EndpointsList_To_v1_EndpointsList(in *core.EndpointsList, out *v1.EndpointsList, s conversion.Scope) error { + return autoConvert_core_EndpointsList_To_v1_EndpointsList(in, out, s) +} + +func autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*core.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*core.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_EnvFromSource_To_core_EnvFromSource is an autogenerated conversion function. +func Convert_v1_EnvFromSource_To_core_EnvFromSource(in *v1.EnvFromSource, out *core.EnvFromSource, s conversion.Scope) error { + return autoConvert_v1_EnvFromSource_To_core_EnvFromSource(in, out, s) +} + +func autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + out.Prefix = in.Prefix + out.ConfigMapRef = (*v1.ConfigMapEnvSource)(unsafe.Pointer(in.ConfigMapRef)) + out.SecretRef = (*v1.SecretEnvSource)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_EnvFromSource_To_v1_EnvFromSource is an autogenerated conversion function. +func Convert_core_EnvFromSource_To_v1_EnvFromSource(in *core.EnvFromSource, out *v1.EnvFromSource, s conversion.Scope) error { + return autoConvert_core_EnvFromSource_To_v1_EnvFromSource(in, out, s) +} + +func autoConvert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*core.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_v1_EnvVar_To_core_EnvVar is an autogenerated conversion function. +func Convert_v1_EnvVar_To_core_EnvVar(in *v1.EnvVar, out *core.EnvVar, s conversion.Scope) error { + return autoConvert_v1_EnvVar_To_core_EnvVar(in, out, s) +} + +func autoConvert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + out.ValueFrom = (*v1.EnvVarSource)(unsafe.Pointer(in.ValueFrom)) + return nil +} + +// Convert_core_EnvVar_To_v1_EnvVar is an autogenerated conversion function. +func Convert_core_EnvVar_To_v1_EnvVar(in *core.EnvVar, out *v1.EnvVar, s conversion.Scope) error { + return autoConvert_core_EnvVar_To_v1_EnvVar(in, out, s) +} + +func autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*core.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*core.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*core.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*core.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_v1_EnvVarSource_To_core_EnvVarSource is an autogenerated conversion function. +func Convert_v1_EnvVarSource_To_core_EnvVarSource(in *v1.EnvVarSource, out *core.EnvVarSource, s conversion.Scope) error { + return autoConvert_v1_EnvVarSource_To_core_EnvVarSource(in, out, s) +} + +func autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + out.FieldRef = (*v1.ObjectFieldSelector)(unsafe.Pointer(in.FieldRef)) + out.ResourceFieldRef = (*v1.ResourceFieldSelector)(unsafe.Pointer(in.ResourceFieldRef)) + out.ConfigMapKeyRef = (*v1.ConfigMapKeySelector)(unsafe.Pointer(in.ConfigMapKeyRef)) + out.SecretKeyRef = (*v1.SecretKeySelector)(unsafe.Pointer(in.SecretKeyRef)) + return nil +} + +// Convert_core_EnvVarSource_To_v1_EnvVarSource is an autogenerated conversion function. +func Convert_core_EnvVarSource_To_v1_EnvVarSource(in *core.EnvVarSource, out *v1.EnvVarSource, s conversion.Scope) error { + return autoConvert_core_EnvVarSource_To_v1_EnvVarSource(in, out, s) +} + +func autoConvert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_v1_EventSource_To_core_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*core.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*core.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_v1_Event_To_core_Event is an autogenerated conversion function. +func Convert_v1_Event_To_core_Event(in *v1.Event, out *core.Event, s conversion.Scope) error { + return autoConvert_v1_Event_To_core_Event(in, out, s) +} + +func autoConvert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.InvolvedObject, &out.InvolvedObject, s); err != nil { + return err + } + out.Reason = in.Reason + out.Message = in.Message + if err := Convert_core_EventSource_To_v1_EventSource(&in.Source, &out.Source, s); err != nil { + return err + } + out.FirstTimestamp = in.FirstTimestamp + out.LastTimestamp = in.LastTimestamp + out.Count = in.Count + out.Type = in.Type + out.EventTime = in.EventTime + out.Series = (*v1.EventSeries)(unsafe.Pointer(in.Series)) + out.Action = in.Action + out.Related = (*v1.ObjectReference)(unsafe.Pointer(in.Related)) + out.ReportingController = in.ReportingController + out.ReportingInstance = in.ReportingInstance + return nil +} + +// Convert_core_Event_To_v1_Event is an autogenerated conversion function. +func Convert_core_Event_To_v1_Event(in *core.Event, out *v1.Event, s conversion.Scope) error { + return autoConvert_core_Event_To_v1_Event(in, out, s) +} + +func autoConvert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_EventList_To_core_EventList is an autogenerated conversion function. +func Convert_v1_EventList_To_core_EventList(in *v1.EventList, out *core.EventList, s conversion.Scope) error { + return autoConvert_v1_EventList_To_core_EventList(in, out, s) +} + +func autoConvert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Event)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_EventList_To_v1_EventList is an autogenerated conversion function. +func Convert_core_EventList_To_v1_EventList(in *core.EventList, out *v1.EventList, s conversion.Scope) error { + return autoConvert_core_EventList_To_v1_EventList(in, out, s) +} + +func autoConvert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = core.EventSeriesState(in.State) + return nil +} + +// Convert_v1_EventSeries_To_core_EventSeries is an autogenerated conversion function. +func Convert_v1_EventSeries_To_core_EventSeries(in *v1.EventSeries, out *core.EventSeries, s conversion.Scope) error { + return autoConvert_v1_EventSeries_To_core_EventSeries(in, out, s) +} + +func autoConvert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + out.Count = in.Count + out.LastObservedTime = in.LastObservedTime + out.State = v1.EventSeriesState(in.State) + return nil +} + +// Convert_core_EventSeries_To_v1_EventSeries is an autogenerated conversion function. +func Convert_core_EventSeries_To_v1_EventSeries(in *core.EventSeries, out *v1.EventSeries, s conversion.Scope) error { + return autoConvert_core_EventSeries_To_v1_EventSeries(in, out, s) +} + +func autoConvert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_v1_EventSource_To_core_EventSource is an autogenerated conversion function. +func Convert_v1_EventSource_To_core_EventSource(in *v1.EventSource, out *core.EventSource, s conversion.Scope) error { + return autoConvert_v1_EventSource_To_core_EventSource(in, out, s) +} + +func autoConvert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + out.Component = in.Component + out.Host = in.Host + return nil +} + +// Convert_core_EventSource_To_v1_EventSource is an autogenerated conversion function. +func Convert_core_EventSource_To_v1_EventSource(in *core.EventSource, out *v1.EventSource, s conversion.Scope) error { + return autoConvert_core_EventSource_To_v1_EventSource(in, out, s) +} + +func autoConvert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_ExecAction_To_core_ExecAction is an autogenerated conversion function. +func Convert_v1_ExecAction_To_core_ExecAction(in *v1.ExecAction, out *core.ExecAction, s conversion.Scope) error { + return autoConvert_v1_ExecAction_To_core_ExecAction(in, out, s) +} + +func autoConvert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_ExecAction_To_v1_ExecAction is an autogenerated conversion function. +func Convert_core_ExecAction_To_v1_ExecAction(in *core.ExecAction, out *v1.ExecAction, s conversion.Scope) error { + return autoConvert_core_ExecAction_To_v1_ExecAction(in, out, s) +} + +func autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_v1_FCVolumeSource_To_core_FCVolumeSource is an autogenerated conversion function. +func Convert_v1_FCVolumeSource_To_core_FCVolumeSource(in *v1.FCVolumeSource, out *core.FCVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FCVolumeSource_To_core_FCVolumeSource(in, out, s) +} + +func autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + out.TargetWWNs = *(*[]string)(unsafe.Pointer(&in.TargetWWNs)) + out.Lun = (*int32)(unsafe.Pointer(in.Lun)) + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.WWIDs = *(*[]string)(unsafe.Pointer(&in.WWIDs)) + return nil +} + +// Convert_core_FCVolumeSource_To_v1_FCVolumeSource is an autogenerated conversion function. +func Convert_core_FCVolumeSource_To_v1_FCVolumeSource(in *core.FCVolumeSource, out *v1.FCVolumeSource, s conversion.Scope) error { + return autoConvert_core_FCVolumeSource_To_v1_FCVolumeSource(in, out, s) +} + +func autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource is an autogenerated conversion function. +func Convert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in *v1.FlexVolumeSource, out *core.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlexVolumeSource_To_core_FlexVolumeSource(in, out, s) +} + +func autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + out.Driver = in.Driver + out.FSType = in.FSType + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + out.Options = *(*map[string]string)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource is an autogenerated conversion function. +func Convert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in *core.FlexVolumeSource, out *v1.FlexVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlexVolumeSource_To_v1_FlexVolumeSource(in, out, s) +} + +func autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource is an autogenerated conversion function. +func Convert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in *v1.FlockerVolumeSource, out *core.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_v1_FlockerVolumeSource_To_core_FlockerVolumeSource(in, out, s) +} + +func autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + out.DatasetName = in.DatasetName + out.DatasetUUID = in.DatasetUUID + return nil +} + +// Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource is an autogenerated conversion function. +func Convert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in *core.FlockerVolumeSource, out *v1.FlockerVolumeSource, s conversion.Scope) error { + return autoConvert_core_FlockerVolumeSource_To_v1_FlockerVolumeSource(in, out, s) +} + +func autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in *v1.GCEPersistentDiskVolumeSource, out *core.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GCEPersistentDiskVolumeSource_To_core_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + out.PDName = in.PDName + out.FSType = in.FSType + out.Partition = in.Partition + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in *core.GCEPersistentDiskVolumeSource, out *v1.GCEPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_GCEPersistentDiskVolumeSource_To_v1_GCEPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in *v1.GitRepoVolumeSource, out *core.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GitRepoVolumeSource_To_core_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + out.Repository = in.Repository + out.Revision = in.Revision + out.Directory = in.Directory + return nil +} + +// Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource is an autogenerated conversion function. +func Convert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in *core.GitRepoVolumeSource, out *v1.GitRepoVolumeSource, s conversion.Scope) error { + return autoConvert_core_GitRepoVolumeSource_To_v1_GitRepoVolumeSource(in, out, s) +} + +func autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in *v1.GlusterfsVolumeSource, out *core.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_v1_GlusterfsVolumeSource_To_core_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + out.EndpointsName = in.EndpointsName + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource is an autogenerated conversion function. +func Convert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in *core.GlusterfsVolumeSource, out *v1.GlusterfsVolumeSource, s conversion.Scope) error { + return autoConvert_core_GlusterfsVolumeSource_To_v1_GlusterfsVolumeSource(in, out, s) +} + +func autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = core.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]core.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_v1_HTTPGetAction_To_core_HTTPGetAction is an autogenerated conversion function. +func Convert_v1_HTTPGetAction_To_core_HTTPGetAction(in *v1.HTTPGetAction, out *core.HTTPGetAction, s conversion.Scope) error { + return autoConvert_v1_HTTPGetAction_To_core_HTTPGetAction(in, out, s) +} + +func autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + out.Path = in.Path + out.Port = in.Port + out.Host = in.Host + out.Scheme = v1.URIScheme(in.Scheme) + out.HTTPHeaders = *(*[]v1.HTTPHeader)(unsafe.Pointer(&in.HTTPHeaders)) + return nil +} + +// Convert_core_HTTPGetAction_To_v1_HTTPGetAction is an autogenerated conversion function. +func Convert_core_HTTPGetAction_To_v1_HTTPGetAction(in *core.HTTPGetAction, out *v1.HTTPGetAction, s conversion.Scope) error { + return autoConvert_core_HTTPGetAction_To_v1_HTTPGetAction(in, out, s) +} + +func autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_HTTPHeader_To_core_HTTPHeader is an autogenerated conversion function. +func Convert_v1_HTTPHeader_To_core_HTTPHeader(in *v1.HTTPHeader, out *core.HTTPHeader, s conversion.Scope) error { + return autoConvert_v1_HTTPHeader_To_core_HTTPHeader(in, out, s) +} + +func autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_HTTPHeader_To_v1_HTTPHeader is an autogenerated conversion function. +func Convert_core_HTTPHeader_To_v1_HTTPHeader(in *core.HTTPHeader, out *v1.HTTPHeader, s conversion.Scope) error { + return autoConvert_core_HTTPHeader_To_v1_HTTPHeader(in, out, s) +} + +func autoConvert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + out.Exec = (*core.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*core.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*core.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_v1_Handler_To_core_Handler is an autogenerated conversion function. +func Convert_v1_Handler_To_core_Handler(in *v1.Handler, out *core.Handler, s conversion.Scope) error { + return autoConvert_v1_Handler_To_core_Handler(in, out, s) +} + +func autoConvert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + out.Exec = (*v1.ExecAction)(unsafe.Pointer(in.Exec)) + out.HTTPGet = (*v1.HTTPGetAction)(unsafe.Pointer(in.HTTPGet)) + out.TCPSocket = (*v1.TCPSocketAction)(unsafe.Pointer(in.TCPSocket)) + return nil +} + +// Convert_core_Handler_To_v1_Handler is an autogenerated conversion function. +func Convert_core_Handler_To_v1_Handler(in *core.Handler, out *v1.Handler, s conversion.Scope) error { + return autoConvert_core_Handler_To_v1_Handler(in, out, s) +} + +func autoConvert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_v1_HostAlias_To_core_HostAlias is an autogenerated conversion function. +func Convert_v1_HostAlias_To_core_HostAlias(in *v1.HostAlias, out *core.HostAlias, s conversion.Scope) error { + return autoConvert_v1_HostAlias_To_core_HostAlias(in, out, s) +} + +func autoConvert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + out.IP = in.IP + out.Hostnames = *(*[]string)(unsafe.Pointer(&in.Hostnames)) + return nil +} + +// Convert_core_HostAlias_To_v1_HostAlias is an autogenerated conversion function. +func Convert_core_HostAlias_To_v1_HostAlias(in *core.HostAlias, out *v1.HostAlias, s conversion.Scope) error { + return autoConvert_core_HostAlias_To_v1_HostAlias(in, out, s) +} + +func autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*core.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource is an autogenerated conversion function. +func Convert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in *v1.HostPathVolumeSource, out *core.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_v1_HostPathVolumeSource_To_core_HostPathVolumeSource(in, out, s) +} + +func autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + out.Path = in.Path + out.Type = (*v1.HostPathType)(unsafe.Pointer(in.Type)) + return nil +} + +// Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource is an autogenerated conversion function. +func Convert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in *core.HostPathVolumeSource, out *v1.HostPathVolumeSource, s conversion.Scope) error { + return autoConvert_core_HostPathVolumeSource_To_v1_HostPathVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in *v1.ISCSIPersistentVolumeSource, out *core.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIPersistentVolumeSource_To_core_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in *core.ISCSIPersistentVolumeSource, out *v1.ISCSIPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIPersistentVolumeSource_To_v1_ISCSIPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in *v1.ISCSIVolumeSource, out *core.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ISCSIVolumeSource_To_core_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + out.TargetPortal = in.TargetPortal + out.IQN = in.IQN + out.Lun = in.Lun + out.ISCSIInterface = in.ISCSIInterface + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.Portals = *(*[]string)(unsafe.Pointer(&in.Portals)) + out.DiscoveryCHAPAuth = in.DiscoveryCHAPAuth + out.SessionCHAPAuth = in.SessionCHAPAuth + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.InitiatorName = (*string)(unsafe.Pointer(in.InitiatorName)) + return nil +} + +// Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource is an autogenerated conversion function. +func Convert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in *core.ISCSIVolumeSource, out *v1.ISCSIVolumeSource, s conversion.Scope) error { + return autoConvert_core_ISCSIVolumeSource_To_v1_ISCSIVolumeSource(in, out, s) +} + +func autoConvert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_v1_KeyToPath_To_core_KeyToPath is an autogenerated conversion function. +func Convert_v1_KeyToPath_To_core_KeyToPath(in *v1.KeyToPath, out *core.KeyToPath, s conversion.Scope) error { + return autoConvert_v1_KeyToPath_To_core_KeyToPath(in, out, s) +} + +func autoConvert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + out.Key = in.Key + out.Path = in.Path + out.Mode = (*int32)(unsafe.Pointer(in.Mode)) + return nil +} + +// Convert_core_KeyToPath_To_v1_KeyToPath is an autogenerated conversion function. +func Convert_core_KeyToPath_To_v1_KeyToPath(in *core.KeyToPath, out *v1.KeyToPath, s conversion.Scope) error { + return autoConvert_core_KeyToPath_To_v1_KeyToPath(in, out, s) +} + +func autoConvert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + out.PostStart = (*core.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*core.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_v1_Lifecycle_To_core_Lifecycle is an autogenerated conversion function. +func Convert_v1_Lifecycle_To_core_Lifecycle(in *v1.Lifecycle, out *core.Lifecycle, s conversion.Scope) error { + return autoConvert_v1_Lifecycle_To_core_Lifecycle(in, out, s) +} + +func autoConvert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + out.PostStart = (*v1.Handler)(unsafe.Pointer(in.PostStart)) + out.PreStop = (*v1.Handler)(unsafe.Pointer(in.PreStop)) + return nil +} + +// Convert_core_Lifecycle_To_v1_Lifecycle is an autogenerated conversion function. +func Convert_core_Lifecycle_To_v1_Lifecycle(in *core.Lifecycle, out *v1.Lifecycle, s conversion.Scope) error { + return autoConvert_core_Lifecycle_To_v1_Lifecycle(in, out, s) +} + +func autoConvert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1_LimitRange_To_core_LimitRange is an autogenerated conversion function. +func Convert_v1_LimitRange_To_core_LimitRange(in *v1.LimitRange, out *core.LimitRange, s conversion.Scope) error { + return autoConvert_v1_LimitRange_To_core_LimitRange(in, out, s) +} + +func autoConvert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_core_LimitRange_To_v1_LimitRange is an autogenerated conversion function. +func Convert_core_LimitRange_To_v1_LimitRange(in *core.LimitRange, out *v1.LimitRange, s conversion.Scope) error { + return autoConvert_core_LimitRange_To_v1_LimitRange(in, out, s) +} + +func autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + out.Type = core.LimitType(in.Type) + out.Max = *(*core.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*core.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*core.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*core.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*core.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_v1_LimitRangeItem_To_core_LimitRangeItem is an autogenerated conversion function. +func Convert_v1_LimitRangeItem_To_core_LimitRangeItem(in *v1.LimitRangeItem, out *core.LimitRangeItem, s conversion.Scope) error { + return autoConvert_v1_LimitRangeItem_To_core_LimitRangeItem(in, out, s) +} + +func autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + out.Type = v1.LimitType(in.Type) + out.Max = *(*v1.ResourceList)(unsafe.Pointer(&in.Max)) + out.Min = *(*v1.ResourceList)(unsafe.Pointer(&in.Min)) + out.Default = *(*v1.ResourceList)(unsafe.Pointer(&in.Default)) + out.DefaultRequest = *(*v1.ResourceList)(unsafe.Pointer(&in.DefaultRequest)) + out.MaxLimitRequestRatio = *(*v1.ResourceList)(unsafe.Pointer(&in.MaxLimitRequestRatio)) + return nil +} + +// Convert_core_LimitRangeItem_To_v1_LimitRangeItem is an autogenerated conversion function. +func Convert_core_LimitRangeItem_To_v1_LimitRangeItem(in *core.LimitRangeItem, out *v1.LimitRangeItem, s conversion.Scope) error { + return autoConvert_core_LimitRangeItem_To_v1_LimitRangeItem(in, out, s) +} + +func autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_LimitRangeList_To_core_LimitRangeList is an autogenerated conversion function. +func Convert_v1_LimitRangeList_To_core_LimitRangeList(in *v1.LimitRangeList, out *core.LimitRangeList, s conversion.Scope) error { + return autoConvert_v1_LimitRangeList_To_core_LimitRangeList(in, out, s) +} + +func autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.LimitRange)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_LimitRangeList_To_v1_LimitRangeList is an autogenerated conversion function. +func Convert_core_LimitRangeList_To_v1_LimitRangeList(in *core.LimitRangeList, out *v1.LimitRangeList, s conversion.Scope) error { + return autoConvert_core_LimitRangeList_To_v1_LimitRangeList(in, out, s) +} + +func autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]core.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec is an autogenerated conversion function. +func Convert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in *v1.LimitRangeSpec, out *core.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_v1_LimitRangeSpec_To_core_LimitRangeSpec(in, out, s) +} + +func autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + out.Limits = *(*[]v1.LimitRangeItem)(unsafe.Pointer(&in.Limits)) + return nil +} + +// Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec is an autogenerated conversion function. +func Convert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in *core.LimitRangeSpec, out *v1.LimitRangeSpec, s conversion.Scope) error { + return autoConvert_core_LimitRangeSpec_To_v1_LimitRangeSpec(in, out, s) +} + +func autoConvert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.Object, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_List_To_core_List is an autogenerated conversion function. +func Convert_v1_List_To_core_List(in *v1.List, out *core.List, s conversion.Scope) error { + return autoConvert_v1_List_To_core_List(in, out, s) +} + +func autoConvert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_List_To_v1_List is an autogenerated conversion function. +func Convert_core_List_To_v1_List(in *core.List, out *v1.List, s conversion.Scope) error { + return autoConvert_core_List_To_v1_List(in, out, s) +} + +func autoConvert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_string_To_labels_Selector(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_string_To_fields_Selector(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_v1_ListOptions_To_core_ListOptions is an autogenerated conversion function. +func Convert_v1_ListOptions_To_core_ListOptions(in *v1.ListOptions, out *core.ListOptions, s conversion.Scope) error { + return autoConvert_v1_ListOptions_To_core_ListOptions(in, out, s) +} + +func autoConvert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + if err := meta_v1.Convert_labels_Selector_To_string(&in.LabelSelector, &out.LabelSelector, s); err != nil { + return err + } + if err := meta_v1.Convert_fields_Selector_To_string(&in.FieldSelector, &out.FieldSelector, s); err != nil { + return err + } + out.IncludeUninitialized = in.IncludeUninitialized + out.Watch = in.Watch + out.ResourceVersion = in.ResourceVersion + out.TimeoutSeconds = (*int64)(unsafe.Pointer(in.TimeoutSeconds)) + return nil +} + +// Convert_core_ListOptions_To_v1_ListOptions is an autogenerated conversion function. +func Convert_core_ListOptions_To_v1_ListOptions(in *core.ListOptions, out *v1.ListOptions, s conversion.Scope) error { + return autoConvert_core_ListOptions_To_v1_ListOptions(in, out, s) +} + +func autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress is an autogenerated conversion function. +func Convert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in *v1.LoadBalancerIngress, out *core.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerIngress_To_core_LoadBalancerIngress(in, out, s) +} + +func autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + out.IP = in.IP + out.Hostname = in.Hostname + return nil +} + +// Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress is an autogenerated conversion function. +func Convert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in *core.LoadBalancerIngress, out *v1.LoadBalancerIngress, s conversion.Scope) error { + return autoConvert_core_LoadBalancerIngress_To_v1_LoadBalancerIngress(in, out, s) +} + +func autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]core.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus is an autogenerated conversion function. +func Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in *v1.LoadBalancerStatus, out *core.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(in, out, s) +} + +func autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + out.Ingress = *(*[]v1.LoadBalancerIngress)(unsafe.Pointer(&in.Ingress)) + return nil +} + +// Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus is an autogenerated conversion function. +func Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in *core.LoadBalancerStatus, out *v1.LoadBalancerStatus, s conversion.Scope) error { + return autoConvert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(in, out, s) +} + +func autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_v1_LocalObjectReference_To_core_LocalObjectReference is an autogenerated conversion function. +func Convert_v1_LocalObjectReference_To_core_LocalObjectReference(in *v1.LocalObjectReference, out *core.LocalObjectReference, s conversion.Scope) error { + return autoConvert_v1_LocalObjectReference_To_core_LocalObjectReference(in, out, s) +} + +func autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + out.Name = in.Name + return nil +} + +// Convert_core_LocalObjectReference_To_v1_LocalObjectReference is an autogenerated conversion function. +func Convert_core_LocalObjectReference_To_v1_LocalObjectReference(in *core.LocalObjectReference, out *v1.LocalObjectReference, s conversion.Scope) error { + return autoConvert_core_LocalObjectReference_To_v1_LocalObjectReference(in, out, s) +} + +func autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource is an autogenerated conversion function. +func Convert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in *v1.LocalVolumeSource, out *core.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_v1_LocalVolumeSource_To_core_LocalVolumeSource(in, out, s) +} + +func autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource is an autogenerated conversion function. +func Convert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in *core.LocalVolumeSource, out *v1.LocalVolumeSource, s conversion.Scope) error { + return autoConvert_core_LocalVolumeSource_To_v1_LocalVolumeSource(in, out, s) +} + +func autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource is an autogenerated conversion function. +func Convert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in *v1.NFSVolumeSource, out *core.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_NFSVolumeSource_To_core_NFSVolumeSource(in, out, s) +} + +func autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + out.Server = in.Server + out.Path = in.Path + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource is an autogenerated conversion function. +func Convert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in *core.NFSVolumeSource, out *v1.NFSVolumeSource, s conversion.Scope) error { + return autoConvert_core_NFSVolumeSource_To_v1_NFSVolumeSource(in, out, s) +} + +func autoConvert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NamespaceSpec_To_core_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NamespaceStatus_To_core_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Namespace_To_core_Namespace is an autogenerated conversion function. +func Convert_v1_Namespace_To_core_Namespace(in *v1.Namespace, out *core.Namespace, s conversion.Scope) error { + return autoConvert_v1_Namespace_To_core_Namespace(in, out, s) +} + +func autoConvert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NamespaceSpec_To_v1_NamespaceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NamespaceStatus_To_v1_NamespaceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Namespace_To_v1_Namespace is an autogenerated conversion function. +func Convert_core_Namespace_To_v1_Namespace(in *core.Namespace, out *v1.Namespace, s conversion.Scope) error { + return autoConvert_core_Namespace_To_v1_Namespace(in, out, s) +} + +func autoConvert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NamespaceList_To_core_NamespaceList is an autogenerated conversion function. +func Convert_v1_NamespaceList_To_core_NamespaceList(in *v1.NamespaceList, out *core.NamespaceList, s conversion.Scope) error { + return autoConvert_v1_NamespaceList_To_core_NamespaceList(in, out, s) +} + +func autoConvert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Namespace)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NamespaceList_To_v1_NamespaceList is an autogenerated conversion function. +func Convert_core_NamespaceList_To_v1_NamespaceList(in *core.NamespaceList, out *v1.NamespaceList, s conversion.Scope) error { + return autoConvert_core_NamespaceList_To_v1_NamespaceList(in, out, s) +} + +func autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]core.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_v1_NamespaceSpec_To_core_NamespaceSpec is an autogenerated conversion function. +func Convert_v1_NamespaceSpec_To_core_NamespaceSpec(in *v1.NamespaceSpec, out *core.NamespaceSpec, s conversion.Scope) error { + return autoConvert_v1_NamespaceSpec_To_core_NamespaceSpec(in, out, s) +} + +func autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + out.Finalizers = *(*[]v1.FinalizerName)(unsafe.Pointer(&in.Finalizers)) + return nil +} + +// Convert_core_NamespaceSpec_To_v1_NamespaceSpec is an autogenerated conversion function. +func Convert_core_NamespaceSpec_To_v1_NamespaceSpec(in *core.NamespaceSpec, out *v1.NamespaceSpec, s conversion.Scope) error { + return autoConvert_core_NamespaceSpec_To_v1_NamespaceSpec(in, out, s) +} + +func autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + out.Phase = core.NamespacePhase(in.Phase) + return nil +} + +// Convert_v1_NamespaceStatus_To_core_NamespaceStatus is an autogenerated conversion function. +func Convert_v1_NamespaceStatus_To_core_NamespaceStatus(in *v1.NamespaceStatus, out *core.NamespaceStatus, s conversion.Scope) error { + return autoConvert_v1_NamespaceStatus_To_core_NamespaceStatus(in, out, s) +} + +func autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + out.Phase = v1.NamespacePhase(in.Phase) + return nil +} + +// Convert_core_NamespaceStatus_To_v1_NamespaceStatus is an autogenerated conversion function. +func Convert_core_NamespaceStatus_To_v1_NamespaceStatus(in *core.NamespaceStatus, out *v1.NamespaceStatus, s conversion.Scope) error { + return autoConvert_core_NamespaceStatus_To_v1_NamespaceStatus(in, out, s) +} + +func autoConvert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_NodeSpec_To_core_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_NodeStatus_To_core_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Node_To_core_Node is an autogenerated conversion function. +func Convert_v1_Node_To_core_Node(in *v1.Node, out *core.Node, s conversion.Scope) error { + return autoConvert_v1_Node_To_core_Node(in, out, s) +} + +func autoConvert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_NodeSpec_To_v1_NodeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_NodeStatus_To_v1_NodeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Node_To_v1_Node is an autogenerated conversion function. +func Convert_core_Node_To_v1_Node(in *core.Node, out *v1.Node, s conversion.Scope) error { + return autoConvert_core_Node_To_v1_Node(in, out, s) +} + +func autoConvert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + out.Type = core.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_v1_NodeAddress_To_core_NodeAddress is an autogenerated conversion function. +func Convert_v1_NodeAddress_To_core_NodeAddress(in *v1.NodeAddress, out *core.NodeAddress, s conversion.Scope) error { + return autoConvert_v1_NodeAddress_To_core_NodeAddress(in, out, s) +} + +func autoConvert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + out.Type = v1.NodeAddressType(in.Type) + out.Address = in.Address + return nil +} + +// Convert_core_NodeAddress_To_v1_NodeAddress is an autogenerated conversion function. +func Convert_core_NodeAddress_To_v1_NodeAddress(in *core.NodeAddress, out *v1.NodeAddress, s conversion.Scope) error { + return autoConvert_core_NodeAddress_To_v1_NodeAddress(in, out, s) +} + +func autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*core.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_NodeAffinity_To_core_NodeAffinity is an autogenerated conversion function. +func Convert_v1_NodeAffinity_To_core_NodeAffinity(in *v1.NodeAffinity, out *core.NodeAffinity, s conversion.Scope) error { + return autoConvert_v1_NodeAffinity_To_core_NodeAffinity(in, out, s) +} + +func autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = (*v1.NodeSelector)(unsafe.Pointer(in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PreferredSchedulingTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_NodeAffinity_To_v1_NodeAffinity is an autogenerated conversion function. +func Convert_core_NodeAffinity_To_v1_NodeAffinity(in *core.NodeAffinity, out *v1.NodeAffinity, s conversion.Scope) error { + return autoConvert_core_NodeAffinity_To_v1_NodeAffinity(in, out, s) +} + +func autoConvert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + out.Type = core.NodeConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_NodeCondition_To_core_NodeCondition is an autogenerated conversion function. +func Convert_v1_NodeCondition_To_core_NodeCondition(in *v1.NodeCondition, out *core.NodeCondition, s conversion.Scope) error { + return autoConvert_v1_NodeCondition_To_core_NodeCondition(in, out, s) +} + +func autoConvert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + out.Type = v1.NodeConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastHeartbeatTime = in.LastHeartbeatTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_NodeCondition_To_v1_NodeCondition is an autogenerated conversion function. +func Convert_core_NodeCondition_To_v1_NodeCondition(in *core.NodeCondition, out *v1.NodeCondition, s conversion.Scope) error { + return autoConvert_core_NodeCondition_To_v1_NodeCondition(in, out, s) +} + +func autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*core.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_v1_NodeConfigSource_To_core_NodeConfigSource is an autogenerated conversion function. +func Convert_v1_NodeConfigSource_To_core_NodeConfigSource(in *v1.NodeConfigSource, out *core.NodeConfigSource, s conversion.Scope) error { + return autoConvert_v1_NodeConfigSource_To_core_NodeConfigSource(in, out, s) +} + +func autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + out.ConfigMapRef = (*v1.ObjectReference)(unsafe.Pointer(in.ConfigMapRef)) + return nil +} + +// Convert_core_NodeConfigSource_To_v1_NodeConfigSource is an autogenerated conversion function. +func Convert_core_NodeConfigSource_To_v1_NodeConfigSource(in *core.NodeConfigSource, out *v1.NodeConfigSource, s conversion.Scope) error { + return autoConvert_core_NodeConfigSource_To_v1_NodeConfigSource(in, out, s) +} + +func autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_v1_DaemonEndpoint_To_core_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in *v1.NodeDaemonEndpoints, out *core.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + if err := Convert_core_DaemonEndpoint_To_v1_DaemonEndpoint(&in.KubeletEndpoint, &out.KubeletEndpoint, s); err != nil { + return err + } + return nil +} + +// Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints is an autogenerated conversion function. +func Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in *core.NodeDaemonEndpoints, out *v1.NodeDaemonEndpoints, s conversion.Scope) error { + return autoConvert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(in, out, s) +} + +func autoConvert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_NodeList_To_core_NodeList is an autogenerated conversion function. +func Convert_v1_NodeList_To_core_NodeList(in *v1.NodeList, out *core.NodeList, s conversion.Scope) error { + return autoConvert_v1_NodeList_To_core_NodeList(in, out, s) +} + +func autoConvert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.Node)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_NodeList_To_v1_NodeList is an autogenerated conversion function. +func Convert_core_NodeList_To_v1_NodeList(in *core.NodeList, out *v1.NodeList, s conversion.Scope) error { + return autoConvert_core_NodeList_To_v1_NodeList(in, out, s) +} + +func autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions is an autogenerated conversion function. +func Convert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in *v1.NodeProxyOptions, out *core.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_v1_NodeProxyOptions_To_core_NodeProxyOptions(in, out, s) +} + +func autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions is an autogenerated conversion function. +func Convert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in *core.NodeProxyOptions, out *v1.NodeProxyOptions, s conversion.Scope) error { + return autoConvert_core_NodeProxyOptions_To_v1_NodeProxyOptions(in, out, s) +} + +func autoConvert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_v1_NodeResources_To_core_NodeResources is an autogenerated conversion function. +func Convert_v1_NodeResources_To_core_NodeResources(in *v1.NodeResources, out *core.NodeResources, s conversion.Scope) error { + return autoConvert_v1_NodeResources_To_core_NodeResources(in, out, s) +} + +func autoConvert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + return nil +} + +// Convert_core_NodeResources_To_v1_NodeResources is an autogenerated conversion function. +func Convert_core_NodeResources_To_v1_NodeResources(in *core.NodeResources, out *v1.NodeResources, s conversion.Scope) error { + return autoConvert_core_NodeResources_To_v1_NodeResources(in, out, s) +} + +func autoConvert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]core.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_v1_NodeSelector_To_core_NodeSelector is an autogenerated conversion function. +func Convert_v1_NodeSelector_To_core_NodeSelector(in *v1.NodeSelector, out *core.NodeSelector, s conversion.Scope) error { + return autoConvert_v1_NodeSelector_To_core_NodeSelector(in, out, s) +} + +func autoConvert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + out.NodeSelectorTerms = *(*[]v1.NodeSelectorTerm)(unsafe.Pointer(&in.NodeSelectorTerms)) + return nil +} + +// Convert_core_NodeSelector_To_v1_NodeSelector is an autogenerated conversion function. +func Convert_core_NodeSelector_To_v1_NodeSelector(in *core.NodeSelector, out *v1.NodeSelector, s conversion.Scope) error { + return autoConvert_core_NodeSelector_To_v1_NodeSelector(in, out, s) +} + +func autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in *v1.NodeSelectorRequirement, out *core.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorRequirement_To_core_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.NodeSelectorOperator(in.Operator) + out.Values = *(*[]string)(unsafe.Pointer(&in.Values)) + return nil +} + +// Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement is an autogenerated conversion function. +func Convert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in *core.NodeSelectorRequirement, out *v1.NodeSelectorRequirement, s conversion.Scope) error { + return autoConvert_core_NodeSelectorRequirement_To_v1_NodeSelectorRequirement(in, out, s) +} + +func autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]core.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm is an autogenerated conversion function. +func Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in *v1.NodeSelectorTerm, out *core.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(in, out, s) +} + +func autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + out.MatchExpressions = *(*[]v1.NodeSelectorRequirement)(unsafe.Pointer(&in.MatchExpressions)) + return nil +} + +// Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm is an autogenerated conversion function. +func Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in *core.NodeSelectorTerm, out *v1.NodeSelectorTerm, s conversion.Scope) error { + return autoConvert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(in, out, s) +} + +func autoConvert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]core.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*core.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_v1_NodeSpec_To_core_NodeSpec is an autogenerated conversion function. +func Convert_v1_NodeSpec_To_core_NodeSpec(in *v1.NodeSpec, out *core.NodeSpec, s conversion.Scope) error { + return autoConvert_v1_NodeSpec_To_core_NodeSpec(in, out, s) +} + +func autoConvert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + out.PodCIDR = in.PodCIDR + out.ExternalID = in.ExternalID + out.ProviderID = in.ProviderID + out.Unschedulable = in.Unschedulable + out.Taints = *(*[]v1.Taint)(unsafe.Pointer(&in.Taints)) + out.ConfigSource = (*v1.NodeConfigSource)(unsafe.Pointer(in.ConfigSource)) + return nil +} + +// Convert_core_NodeSpec_To_v1_NodeSpec is an autogenerated conversion function. +func Convert_core_NodeSpec_To_v1_NodeSpec(in *core.NodeSpec, out *v1.NodeSpec, s conversion.Scope) error { + return autoConvert_core_NodeSpec_To_v1_NodeSpec(in, out, s) +} + +func autoConvert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*core.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = core.NodePhase(in.Phase) + out.Conditions = *(*[]core.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]core.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_v1_NodeDaemonEndpoints_To_core_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]core.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]core.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]core.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_v1_NodeStatus_To_core_NodeStatus is an autogenerated conversion function. +func Convert_v1_NodeStatus_To_core_NodeStatus(in *v1.NodeStatus, out *core.NodeStatus, s conversion.Scope) error { + return autoConvert_v1_NodeStatus_To_core_NodeStatus(in, out, s) +} + +func autoConvert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Allocatable = *(*v1.ResourceList)(unsafe.Pointer(&in.Allocatable)) + out.Phase = v1.NodePhase(in.Phase) + out.Conditions = *(*[]v1.NodeCondition)(unsafe.Pointer(&in.Conditions)) + out.Addresses = *(*[]v1.NodeAddress)(unsafe.Pointer(&in.Addresses)) + if err := Convert_core_NodeDaemonEndpoints_To_v1_NodeDaemonEndpoints(&in.DaemonEndpoints, &out.DaemonEndpoints, s); err != nil { + return err + } + if err := Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(&in.NodeInfo, &out.NodeInfo, s); err != nil { + return err + } + out.Images = *(*[]v1.ContainerImage)(unsafe.Pointer(&in.Images)) + out.VolumesInUse = *(*[]v1.UniqueVolumeName)(unsafe.Pointer(&in.VolumesInUse)) + out.VolumesAttached = *(*[]v1.AttachedVolume)(unsafe.Pointer(&in.VolumesAttached)) + return nil +} + +// Convert_core_NodeStatus_To_v1_NodeStatus is an autogenerated conversion function. +func Convert_core_NodeStatus_To_v1_NodeStatus(in *core.NodeStatus, out *v1.NodeStatus, s conversion.Scope) error { + return autoConvert_core_NodeStatus_To_v1_NodeStatus(in, out, s) +} + +func autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo is an autogenerated conversion function. +func Convert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in *v1.NodeSystemInfo, out *core.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_v1_NodeSystemInfo_To_core_NodeSystemInfo(in, out, s) +} + +func autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + out.MachineID = in.MachineID + out.SystemUUID = in.SystemUUID + out.BootID = in.BootID + out.KernelVersion = in.KernelVersion + out.OSImage = in.OSImage + out.ContainerRuntimeVersion = in.ContainerRuntimeVersion + out.KubeletVersion = in.KubeletVersion + out.KubeProxyVersion = in.KubeProxyVersion + out.OperatingSystem = in.OperatingSystem + out.Architecture = in.Architecture + return nil +} + +// Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo is an autogenerated conversion function. +func Convert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in *core.NodeSystemInfo, out *v1.NodeSystemInfo, s conversion.Scope) error { + return autoConvert_core_NodeSystemInfo_To_v1_NodeSystemInfo(in, out, s) +} + +func autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector is an autogenerated conversion function. +func Convert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in *v1.ObjectFieldSelector, out *core.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ObjectFieldSelector_To_core_ObjectFieldSelector(in, out, s) +} + +func autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + out.APIVersion = in.APIVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector is an autogenerated conversion function. +func Convert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in *core.ObjectFieldSelector, out *v1.ObjectFieldSelector, s conversion.Scope) error { + return autoConvert_core_ObjectFieldSelector_To_v1_ObjectFieldSelector(in, out, s) +} + +func autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_v1_ObjectMeta_To_core_ObjectMeta is an autogenerated conversion function. +func Convert_v1_ObjectMeta_To_core_ObjectMeta(in *v1.ObjectMeta, out *core.ObjectMeta, s conversion.Scope) error { + return autoConvert_v1_ObjectMeta_To_core_ObjectMeta(in, out, s) +} + +func autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + out.Name = in.Name + out.GenerateName = in.GenerateName + out.Namespace = in.Namespace + out.SelfLink = in.SelfLink + out.UID = types.UID(in.UID) + out.ResourceVersion = in.ResourceVersion + out.Generation = in.Generation + out.CreationTimestamp = in.CreationTimestamp + out.DeletionTimestamp = (*meta_v1.Time)(unsafe.Pointer(in.DeletionTimestamp)) + out.DeletionGracePeriodSeconds = (*int64)(unsafe.Pointer(in.DeletionGracePeriodSeconds)) + out.Labels = *(*map[string]string)(unsafe.Pointer(&in.Labels)) + out.Annotations = *(*map[string]string)(unsafe.Pointer(&in.Annotations)) + out.OwnerReferences = *(*[]meta_v1.OwnerReference)(unsafe.Pointer(&in.OwnerReferences)) + out.Initializers = (*meta_v1.Initializers)(unsafe.Pointer(in.Initializers)) + out.Finalizers = *(*[]string)(unsafe.Pointer(&in.Finalizers)) + out.ClusterName = in.ClusterName + return nil +} + +// Convert_core_ObjectMeta_To_v1_ObjectMeta is an autogenerated conversion function. +func Convert_core_ObjectMeta_To_v1_ObjectMeta(in *core.ObjectMeta, out *v1.ObjectMeta, s conversion.Scope) error { + return autoConvert_core_ObjectMeta_To_v1_ObjectMeta(in, out, s) +} + +func autoConvert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_v1_ObjectReference_To_core_ObjectReference is an autogenerated conversion function. +func Convert_v1_ObjectReference_To_core_ObjectReference(in *v1.ObjectReference, out *core.ObjectReference, s conversion.Scope) error { + return autoConvert_v1_ObjectReference_To_core_ObjectReference(in, out, s) +} + +func autoConvert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + out.Kind = in.Kind + out.Namespace = in.Namespace + out.Name = in.Name + out.UID = types.UID(in.UID) + out.APIVersion = in.APIVersion + out.ResourceVersion = in.ResourceVersion + out.FieldPath = in.FieldPath + return nil +} + +// Convert_core_ObjectReference_To_v1_ObjectReference is an autogenerated conversion function. +func Convert_core_ObjectReference_To_v1_ObjectReference(in *core.ObjectReference, out *v1.ObjectReference, s conversion.Scope) error { + return autoConvert_core_ObjectReference_To_v1_ObjectReference(in, out, s) +} + +func autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolume_To_core_PersistentVolume is an autogenerated conversion function. +func Convert_v1_PersistentVolume_To_core_PersistentVolume(in *v1.PersistentVolume, out *core.PersistentVolume, s conversion.Scope) error { + return autoConvert_v1_PersistentVolume_To_core_PersistentVolume(in, out, s) +} + +func autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolume_To_v1_PersistentVolume is an autogenerated conversion function. +func Convert_core_PersistentVolume_To_v1_PersistentVolume(in *core.PersistentVolume, out *v1.PersistentVolume, s conversion.Scope) error { + return autoConvert_core_PersistentVolume_To_v1_PersistentVolume(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in *v1.PersistentVolumeClaim, out *core.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaim_To_core_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in *core.PersistentVolumeClaim, out *v1.PersistentVolumeClaim, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaim_To_v1_PersistentVolumeClaim(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = core.PersistentVolumeClaimConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in *v1.PersistentVolumeClaimCondition, out *core.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimCondition_To_core_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + out.Type = v1.PersistentVolumeClaimConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in *core.PersistentVolumeClaimCondition, out *v1.PersistentVolumeClaimCondition, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimCondition_To_v1_PersistentVolumeClaimCondition(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in *v1.PersistentVolumeClaimList, out *core.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimList_To_core_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.PersistentVolumeClaim)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in *core.PersistentVolumeClaimList, out *v1.PersistentVolumeClaimList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimList_To_v1_PersistentVolumeClaimList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_v1_ResourceRequirements_To_core_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in *v1.PersistentVolumeClaimSpec, out *core.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimSpec_To_core_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Selector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.Selector)) + if err := Convert_core_ResourceRequirements_To_v1_ResourceRequirements(&in.Resources, &out.Resources, s); err != nil { + return err + } + out.VolumeName = in.VolumeName + out.StorageClassName = (*string)(unsafe.Pointer(in.StorageClassName)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in *core.PersistentVolumeClaimSpec, out *v1.PersistentVolumeClaimSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimSpec_To_v1_PersistentVolumeClaimSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]core.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in *v1.PersistentVolumeClaimStatus, out *core.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimStatus_To_core_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumeClaimPhase(in.Phase) + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + out.Conditions = *(*[]v1.PersistentVolumeClaimCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in *core.PersistentVolumeClaimStatus, out *v1.PersistentVolumeClaimStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimStatus_To_v1_PersistentVolumeClaimStatus(in, out, s) +} + +func autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in *v1.PersistentVolumeClaimVolumeSource, out *core.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeClaimVolumeSource_To_core_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + out.ClaimName = in.ClaimName + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in *core.PersistentVolumeClaimVolumeSource, out *v1.PersistentVolumeClaimVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeClaimVolumeSource_To_v1_PersistentVolumeClaimVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_v1_PersistentVolume_To_core_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList is an autogenerated conversion function. +func Convert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in *v1.PersistentVolumeList, out *core.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeList_To_core_PersistentVolumeList(in, out, s) +} + +func autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PersistentVolume, len(*in)) + for i := range *in { + if err := Convert_core_PersistentVolume_To_v1_PersistentVolume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList is an autogenerated conversion function. +func Convert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in *core.PersistentVolumeList, out *v1.PersistentVolumeList, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeList_To_v1_PersistentVolumeList(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*core.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.ISCSI = (*core.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.AzureFile = (*core.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*core.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*core.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*core.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in *v1.PersistentVolumeSource, out *core.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(in, out, s) +} + +func autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.RBD = (*v1.RBDPersistentVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.ISCSI = (*v1.ISCSIPersistentVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSPersistentVolumeSource)(unsafe.Pointer(in.CephFS)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.AzureFile = (*v1.AzureFilePersistentVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOPersistentVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.Local = (*v1.LocalVolumeSource)(unsafe.Pointer(in.Local)) + out.StorageOS = (*v1.StorageOSPersistentVolumeSource)(unsafe.Pointer(in.StorageOS)) + out.CSI = (*v1.CSIPersistentVolumeSource)(unsafe.Pointer(in.CSI)) + return nil +} + +// Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource is an autogenerated conversion function. +func Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *core.PersistentVolumeSource, out *v1.PersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*core.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_v1_PersistentVolumeSource_To_core_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]core.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*core.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = core.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*core.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in *v1.PersistentVolumeSpec, out *core.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeSpec_To_core_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + out.Capacity = *(*v1.ResourceList)(unsafe.Pointer(&in.Capacity)) + if err := Convert_core_PersistentVolumeSource_To_v1_PersistentVolumeSource(&in.PersistentVolumeSource, &out.PersistentVolumeSource, s); err != nil { + return err + } + out.AccessModes = *(*[]v1.PersistentVolumeAccessMode)(unsafe.Pointer(&in.AccessModes)) + out.ClaimRef = (*v1.ObjectReference)(unsafe.Pointer(in.ClaimRef)) + out.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimPolicy(in.PersistentVolumeReclaimPolicy) + out.StorageClassName = in.StorageClassName + out.MountOptions = *(*[]string)(unsafe.Pointer(&in.MountOptions)) + out.VolumeMode = (*v1.PersistentVolumeMode)(unsafe.Pointer(in.VolumeMode)) + return nil +} + +// Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec is an autogenerated conversion function. +func Convert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in *core.PersistentVolumeSpec, out *v1.PersistentVolumeSpec, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeSpec_To_v1_PersistentVolumeSpec(in, out, s) +} + +func autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = core.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in *v1.PersistentVolumeStatus, out *core.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_v1_PersistentVolumeStatus_To_core_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + out.Phase = v1.PersistentVolumePhase(in.Phase) + out.Message = in.Message + out.Reason = in.Reason + return nil +} + +// Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus is an autogenerated conversion function. +func Convert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in *core.PersistentVolumeStatus, out *v1.PersistentVolumeStatus, s conversion.Scope) error { + return autoConvert_core_PersistentVolumeStatus_To_v1_PersistentVolumeStatus(in, out, s) +} + +func autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in *v1.PhotonPersistentDiskVolumeSource, out *core.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PhotonPersistentDiskVolumeSource_To_core_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + out.PdID = in.PdID + out.FSType = in.FSType + return nil +} + +// Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource is an autogenerated conversion function. +func Convert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in *core.PhotonPersistentDiskVolumeSource, out *v1.PhotonPersistentDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_PhotonPersistentDiskVolumeSource_To_v1_PhotonPersistentDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Pod_To_core_Pod is an autogenerated conversion function. +func Convert_v1_Pod_To_core_Pod(in *v1.Pod, out *core.Pod, s conversion.Scope) error { + return autoConvert_v1_Pod_To_core_Pod(in, out, s) +} + +func autoConvert_core_Pod_To_v1_Pod(in *core.Pod, out *v1.Pod, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAffinity_To_core_PodAffinity is an autogenerated conversion function. +func Convert_v1_PodAffinity_To_core_PodAffinity(in *v1.PodAffinity, out *core.PodAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAffinity_To_core_PodAffinity(in, out, s) +} + +func autoConvert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAffinity_To_v1_PodAffinity is an autogenerated conversion function. +func Convert_core_PodAffinity_To_v1_PodAffinity(in *core.PodAffinity, out *v1.PodAffinity, s conversion.Scope) error { + return autoConvert_core_PodAffinity_To_v1_PodAffinity(in, out, s) +} + +func autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm is an autogenerated conversion function. +func Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in *v1.PodAffinityTerm, out *core.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_PodAffinityTerm_To_core_PodAffinityTerm(in, out, s) +} + +func autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + out.LabelSelector = (*meta_v1.LabelSelector)(unsafe.Pointer(in.LabelSelector)) + out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) + out.TopologyKey = in.TopologyKey + return nil +} + +// Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm is an autogenerated conversion function. +func Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in *core.PodAffinityTerm, out *v1.PodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_PodAffinityTerm_To_v1_PodAffinityTerm(in, out, s) +} + +func autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]core.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]core.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity is an autogenerated conversion function. +func Convert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in *v1.PodAntiAffinity, out *core.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_v1_PodAntiAffinity_To_core_PodAntiAffinity(in, out, s) +} + +func autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + out.RequiredDuringSchedulingIgnoredDuringExecution = *(*[]v1.PodAffinityTerm)(unsafe.Pointer(&in.RequiredDuringSchedulingIgnoredDuringExecution)) + out.PreferredDuringSchedulingIgnoredDuringExecution = *(*[]v1.WeightedPodAffinityTerm)(unsafe.Pointer(&in.PreferredDuringSchedulingIgnoredDuringExecution)) + return nil +} + +// Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity is an autogenerated conversion function. +func Convert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in *core.PodAntiAffinity, out *v1.PodAntiAffinity, s conversion.Scope) error { + return autoConvert_core_PodAntiAffinity_To_v1_PodAntiAffinity(in, out, s) +} + +func autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_v1_PodAttachOptions_To_core_PodAttachOptions is an autogenerated conversion function. +func Convert_v1_PodAttachOptions_To_core_PodAttachOptions(in *v1.PodAttachOptions, out *core.PodAttachOptions, s conversion.Scope) error { + return autoConvert_v1_PodAttachOptions_To_core_PodAttachOptions(in, out, s) +} + +func autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + return nil +} + +// Convert_core_PodAttachOptions_To_v1_PodAttachOptions is an autogenerated conversion function. +func Convert_core_PodAttachOptions_To_v1_PodAttachOptions(in *core.PodAttachOptions, out *v1.PodAttachOptions, s conversion.Scope) error { + return autoConvert_core_PodAttachOptions_To_v1_PodAttachOptions(in, out, s) +} + +func autoConvert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + out.Type = core.PodConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PodCondition_To_core_PodCondition is an autogenerated conversion function. +func Convert_v1_PodCondition_To_core_PodCondition(in *v1.PodCondition, out *core.PodCondition, s conversion.Scope) error { + return autoConvert_v1_PodCondition_To_core_PodCondition(in, out, s) +} + +func autoConvert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + out.Type = v1.PodConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastProbeTime = in.LastProbeTime + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PodCondition_To_v1_PodCondition is an autogenerated conversion function. +func Convert_core_PodCondition_To_v1_PodCondition(in *core.PodCondition, out *v1.PodCondition, s conversion.Scope) error { + return autoConvert_core_PodCondition_To_v1_PodCondition(in, out, s) +} + +func autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]core.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_v1_PodDNSConfig_To_core_PodDNSConfig is an autogenerated conversion function. +func Convert_v1_PodDNSConfig_To_core_PodDNSConfig(in *v1.PodDNSConfig, out *core.PodDNSConfig, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfig_To_core_PodDNSConfig(in, out, s) +} + +func autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + out.Nameservers = *(*[]string)(unsafe.Pointer(&in.Nameservers)) + out.Searches = *(*[]string)(unsafe.Pointer(&in.Searches)) + out.Options = *(*[]v1.PodDNSConfigOption)(unsafe.Pointer(&in.Options)) + return nil +} + +// Convert_core_PodDNSConfig_To_v1_PodDNSConfig is an autogenerated conversion function. +func Convert_core_PodDNSConfig_To_v1_PodDNSConfig(in *core.PodDNSConfig, out *v1.PodDNSConfig, s conversion.Scope) error { + return autoConvert_core_PodDNSConfig_To_v1_PodDNSConfig(in, out, s) +} + +func autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption is an autogenerated conversion function. +func Convert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in *v1.PodDNSConfigOption, out *core.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_v1_PodDNSConfigOption_To_core_PodDNSConfigOption(in, out, s) +} + +func autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + out.Name = in.Name + out.Value = (*string)(unsafe.Pointer(in.Value)) + return nil +} + +// Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption is an autogenerated conversion function. +func Convert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in *core.PodDNSConfigOption, out *v1.PodDNSConfigOption, s conversion.Scope) error { + return autoConvert_core_PodDNSConfigOption_To_v1_PodDNSConfigOption(in, out, s) +} + +func autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_v1_PodExecOptions_To_core_PodExecOptions is an autogenerated conversion function. +func Convert_v1_PodExecOptions_To_core_PodExecOptions(in *v1.PodExecOptions, out *core.PodExecOptions, s conversion.Scope) error { + return autoConvert_v1_PodExecOptions_To_core_PodExecOptions(in, out, s) +} + +func autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + out.Stdin = in.Stdin + out.Stdout = in.Stdout + out.Stderr = in.Stderr + out.TTY = in.TTY + out.Container = in.Container + out.Command = *(*[]string)(unsafe.Pointer(&in.Command)) + return nil +} + +// Convert_core_PodExecOptions_To_v1_PodExecOptions is an autogenerated conversion function. +func Convert_core_PodExecOptions_To_v1_PodExecOptions(in *core.PodExecOptions, out *v1.PodExecOptions, s conversion.Scope) error { + return autoConvert_core_PodExecOptions_To_v1_PodExecOptions(in, out, s) +} + +func autoConvert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Pod, len(*in)) + for i := range *in { + if err := Convert_v1_Pod_To_core_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodList_To_core_PodList is an autogenerated conversion function. +func Convert_v1_PodList_To_core_PodList(in *v1.PodList, out *core.PodList, s conversion.Scope) error { + return autoConvert_v1_PodList_To_core_PodList(in, out, s) +} + +func autoConvert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Pod, len(*in)) + for i := range *in { + if err := Convert_core_Pod_To_v1_Pod(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodList_To_v1_PodList is an autogenerated conversion function. +func Convert_core_PodList_To_v1_PodList(in *core.PodList, out *v1.PodList, s conversion.Scope) error { + return autoConvert_core_PodList_To_v1_PodList(in, out, s) +} + +func autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_v1_PodLogOptions_To_core_PodLogOptions is an autogenerated conversion function. +func Convert_v1_PodLogOptions_To_core_PodLogOptions(in *v1.PodLogOptions, out *core.PodLogOptions, s conversion.Scope) error { + return autoConvert_v1_PodLogOptions_To_core_PodLogOptions(in, out, s) +} + +func autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + out.Container = in.Container + out.Follow = in.Follow + out.Previous = in.Previous + out.SinceSeconds = (*int64)(unsafe.Pointer(in.SinceSeconds)) + out.SinceTime = (*meta_v1.Time)(unsafe.Pointer(in.SinceTime)) + out.Timestamps = in.Timestamps + out.TailLines = (*int64)(unsafe.Pointer(in.TailLines)) + out.LimitBytes = (*int64)(unsafe.Pointer(in.LimitBytes)) + return nil +} + +// Convert_core_PodLogOptions_To_v1_PodLogOptions is an autogenerated conversion function. +func Convert_core_PodLogOptions_To_v1_PodLogOptions(in *core.PodLogOptions, out *v1.PodLogOptions, s conversion.Scope) error { + return autoConvert_core_PodLogOptions_To_v1_PodLogOptions(in, out, s) +} + +func autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions is an autogenerated conversion function. +func Convert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in *v1.PodPortForwardOptions, out *core.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_v1_PodPortForwardOptions_To_core_PodPortForwardOptions(in, out, s) +} + +func autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + out.Ports = *(*[]int32)(unsafe.Pointer(&in.Ports)) + return nil +} + +// Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions is an autogenerated conversion function. +func Convert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in *core.PodPortForwardOptions, out *v1.PodPortForwardOptions, s conversion.Scope) error { + return autoConvert_core_PodPortForwardOptions_To_v1_PodPortForwardOptions(in, out, s) +} + +func autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_PodProxyOptions_To_core_PodProxyOptions is an autogenerated conversion function. +func Convert_v1_PodProxyOptions_To_core_PodProxyOptions(in *v1.PodProxyOptions, out *core.PodProxyOptions, s conversion.Scope) error { + return autoConvert_v1_PodProxyOptions_To_core_PodProxyOptions(in, out, s) +} + +func autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_PodProxyOptions_To_v1_PodProxyOptions is an autogenerated conversion function. +func Convert_core_PodProxyOptions_To_v1_PodProxyOptions(in *core.PodProxyOptions, out *v1.PodProxyOptions, s conversion.Scope) error { + return autoConvert_core_PodProxyOptions_To_v1_PodProxyOptions(in, out, s) +} + +func autoConvert_v1_PodSecurityContext_To_core_PodSecurityContext(in *v1.PodSecurityContext, out *core.PodSecurityContext, s conversion.Scope) error { + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_core_PodSecurityContext_To_v1_PodSecurityContext(in *core.PodSecurityContext, out *v1.PodSecurityContext, s conversion.Scope) error { + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.SupplementalGroups = *(*[]int64)(unsafe.Pointer(&in.SupplementalGroups)) + out.FSGroup = (*int64)(unsafe.Pointer(in.FSGroup)) + return nil +} + +func autoConvert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_v1_PodSignature_To_core_PodSignature is an autogenerated conversion function. +func Convert_v1_PodSignature_To_core_PodSignature(in *v1.PodSignature, out *core.PodSignature, s conversion.Scope) error { + return autoConvert_v1_PodSignature_To_core_PodSignature(in, out, s) +} + +func autoConvert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + out.PodController = (*meta_v1.OwnerReference)(unsafe.Pointer(in.PodController)) + return nil +} + +// Convert_core_PodSignature_To_v1_PodSignature is an autogenerated conversion function. +func Convert_core_PodSignature_To_v1_PodSignature(in *core.PodSignature, out *v1.PodSignature, s conversion.Scope) error { + return autoConvert_core_PodSignature_To_v1_PodSignature(in, out, s) +} + +func autoConvert_v1_PodSpec_To_core_PodSpec(in *v1.PodSpec, out *core.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]core.Volume, len(*in)) + for i := range *in { + if err := Convert_v1_Volume_To_core_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]core.Container, len(*in)) + for i := range *in { + if err := Convert_v1_Container_To_core_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = core.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = core.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + // INFO: in.DeprecatedServiceAccount opted out of conversion generation + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + // INFO: in.HostNetwork opted out of conversion generation + // INFO: in.HostPID opted out of conversion generation + // INFO: in.HostIPC opted out of conversion generation + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(core.PodSecurityContext) + if err := Convert_v1_PodSecurityContext_To_core_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*core.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]core.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]core.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*core.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_core_PodSpec_To_v1_PodSpec(in *core.PodSpec, out *v1.PodSpec, s conversion.Scope) error { + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + if err := Convert_core_Volume_To_v1_Volume(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Volumes = nil + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.InitContainers = nil + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]v1.Container, len(*in)) + for i := range *in { + if err := Convert_core_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Containers = nil + } + out.RestartPolicy = v1.RestartPolicy(in.RestartPolicy) + out.TerminationGracePeriodSeconds = (*int64)(unsafe.Pointer(in.TerminationGracePeriodSeconds)) + out.ActiveDeadlineSeconds = (*int64)(unsafe.Pointer(in.ActiveDeadlineSeconds)) + out.DNSPolicy = v1.DNSPolicy(in.DNSPolicy) + out.NodeSelector = *(*map[string]string)(unsafe.Pointer(&in.NodeSelector)) + out.ServiceAccountName = in.ServiceAccountName + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + out.NodeName = in.NodeName + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(v1.PodSecurityContext) + if err := Convert_core_PodSecurityContext_To_v1_PodSecurityContext(*in, *out, s); err != nil { + return err + } + } else { + out.SecurityContext = nil + } + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.Hostname = in.Hostname + out.Subdomain = in.Subdomain + out.Affinity = (*v1.Affinity)(unsafe.Pointer(in.Affinity)) + out.SchedulerName = in.SchedulerName + out.Tolerations = *(*[]v1.Toleration)(unsafe.Pointer(&in.Tolerations)) + out.HostAliases = *(*[]v1.HostAlias)(unsafe.Pointer(&in.HostAliases)) + out.PriorityClassName = in.PriorityClassName + out.Priority = (*int32)(unsafe.Pointer(in.Priority)) + out.DNSConfig = (*v1.PodDNSConfig)(unsafe.Pointer(in.DNSConfig)) + return nil +} + +func autoConvert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + out.Phase = core.PodPhase(in.Phase) + out.Conditions = *(*[]core.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.InitContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]core.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + out.QOSClass = core.PodQOSClass(in.QOSClass) + return nil +} + +// Convert_v1_PodStatus_To_core_PodStatus is an autogenerated conversion function. +func Convert_v1_PodStatus_To_core_PodStatus(in *v1.PodStatus, out *core.PodStatus, s conversion.Scope) error { + return autoConvert_v1_PodStatus_To_core_PodStatus(in, out, s) +} + +func autoConvert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + out.Phase = v1.PodPhase(in.Phase) + out.Conditions = *(*[]v1.PodCondition)(unsafe.Pointer(&in.Conditions)) + out.Message = in.Message + out.Reason = in.Reason + out.HostIP = in.HostIP + out.PodIP = in.PodIP + out.StartTime = (*meta_v1.Time)(unsafe.Pointer(in.StartTime)) + out.QOSClass = v1.PodQOSClass(in.QOSClass) + out.InitContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.InitContainerStatuses)) + out.ContainerStatuses = *(*[]v1.ContainerStatus)(unsafe.Pointer(&in.ContainerStatuses)) + return nil +} + +// Convert_core_PodStatus_To_v1_PodStatus is an autogenerated conversion function. +func Convert_core_PodStatus_To_v1_PodStatus(in *core.PodStatus, out *v1.PodStatus, s conversion.Scope) error { + return autoConvert_core_PodStatus_To_v1_PodStatus(in, out, s) +} + +func autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodStatus_To_core_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodStatusResult_To_core_PodStatusResult is an autogenerated conversion function. +func Convert_v1_PodStatusResult_To_core_PodStatusResult(in *v1.PodStatusResult, out *core.PodStatusResult, s conversion.Scope) error { + return autoConvert_v1_PodStatusResult_To_core_PodStatusResult(in, out, s) +} + +func autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodStatus_To_v1_PodStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodStatusResult_To_v1_PodStatusResult is an autogenerated conversion function. +func Convert_core_PodStatusResult_To_v1_PodStatusResult(in *core.PodStatusResult, out *v1.PodStatusResult, s conversion.Scope) error { + return autoConvert_core_PodStatusResult_To_v1_PodStatusResult(in, out, s) +} + +func autoConvert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PodTemplate_To_core_PodTemplate is an autogenerated conversion function. +func Convert_v1_PodTemplate_To_core_PodTemplate(in *v1.PodTemplate, out *core.PodTemplate, s conversion.Scope) error { + return autoConvert_v1_PodTemplate_To_core_PodTemplate(in, out, s) +} + +func autoConvert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(&in.Template, &out.Template, s); err != nil { + return err + } + return nil +} + +// Convert_core_PodTemplate_To_v1_PodTemplate is an autogenerated conversion function. +func Convert_core_PodTemplate_To_v1_PodTemplate(in *core.PodTemplate, out *v1.PodTemplate, s conversion.Scope) error { + return autoConvert_core_PodTemplate_To_v1_PodTemplate(in, out, s) +} + +func autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_v1_PodTemplate_To_core_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_PodTemplateList_To_core_PodTemplateList is an autogenerated conversion function. +func Convert_v1_PodTemplateList_To_core_PodTemplateList(in *v1.PodTemplateList, out *core.PodTemplateList, s conversion.Scope) error { + return autoConvert_v1_PodTemplateList_To_core_PodTemplateList(in, out, s) +} + +func autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.PodTemplate, len(*in)) + for i := range *in { + if err := Convert_core_PodTemplate_To_v1_PodTemplate(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_PodTemplateList_To_v1_PodTemplateList is an autogenerated conversion function. +func Convert_core_PodTemplateList_To_v1_PodTemplateList(in *core.PodTemplateList, out *v1.PodTemplateList, s conversion.Scope) error { + return autoConvert_core_PodTemplateList_To_v1_PodTemplateList(in, out, s) +} + +func autoConvert_v1_PodTemplateSpec_To_core_PodTemplateSpec(in *v1.PodTemplateSpec, out *core.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_PodSpec_To_core_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_core_PodTemplateSpec_To_v1_PodTemplateSpec(in *core.PodTemplateSpec, out *v1.PodTemplateSpec, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_PodSpec_To_v1_PodSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +func autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource is an autogenerated conversion function. +func Convert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in *v1.PortworxVolumeSource, out *core.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_v1_PortworxVolumeSource_To_core_PortworxVolumeSource(in, out, s) +} + +func autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + out.VolumeID = in.VolumeID + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource is an autogenerated conversion function. +func Convert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in *core.PortworxVolumeSource, out *v1.PortworxVolumeSource, s conversion.Scope) error { + return autoConvert_core_PortworxVolumeSource_To_v1_PortworxVolumeSource(in, out, s) +} + +func autoConvert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_v1_Preconditions_To_core_Preconditions is an autogenerated conversion function. +func Convert_v1_Preconditions_To_core_Preconditions(in *v1.Preconditions, out *core.Preconditions, s conversion.Scope) error { + return autoConvert_v1_Preconditions_To_core_Preconditions(in, out, s) +} + +func autoConvert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + out.UID = (*types.UID)(unsafe.Pointer(in.UID)) + return nil +} + +// Convert_core_Preconditions_To_v1_Preconditions is an autogenerated conversion function. +func Convert_core_Preconditions_To_v1_Preconditions(in *core.Preconditions, out *v1.Preconditions, s conversion.Scope) error { + return autoConvert_core_Preconditions_To_v1_Preconditions(in, out, s) +} + +func autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_v1_PodSignature_To_core_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in *v1.PreferAvoidPodsEntry, out *core.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_v1_PreferAvoidPodsEntry_To_core_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + if err := Convert_core_PodSignature_To_v1_PodSignature(&in.PodSignature, &out.PodSignature, s); err != nil { + return err + } + out.EvictionTime = in.EvictionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry is an autogenerated conversion function. +func Convert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in *core.PreferAvoidPodsEntry, out *v1.PreferAvoidPodsEntry, s conversion.Scope) error { + return autoConvert_core_PreferAvoidPodsEntry_To_v1_PreferAvoidPodsEntry(in, out, s) +} + +func autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_NodeSelectorTerm_To_core_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in *v1.PreferredSchedulingTerm, out *core.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_v1_PreferredSchedulingTerm_To_core_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_NodeSelectorTerm_To_v1_NodeSelectorTerm(&in.Preference, &out.Preference, s); err != nil { + return err + } + return nil +} + +// Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm is an autogenerated conversion function. +func Convert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in *core.PreferredSchedulingTerm, out *v1.PreferredSchedulingTerm, s conversion.Scope) error { + return autoConvert_core_PreferredSchedulingTerm_To_v1_PreferredSchedulingTerm(in, out, s) +} + +func autoConvert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + if err := Convert_v1_Handler_To_core_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_v1_Probe_To_core_Probe is an autogenerated conversion function. +func Convert_v1_Probe_To_core_Probe(in *v1.Probe, out *core.Probe, s conversion.Scope) error { + return autoConvert_v1_Probe_To_core_Probe(in, out, s) +} + +func autoConvert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + if err := Convert_core_Handler_To_v1_Handler(&in.Handler, &out.Handler, s); err != nil { + return err + } + out.InitialDelaySeconds = in.InitialDelaySeconds + out.TimeoutSeconds = in.TimeoutSeconds + out.PeriodSeconds = in.PeriodSeconds + out.SuccessThreshold = in.SuccessThreshold + out.FailureThreshold = in.FailureThreshold + return nil +} + +// Convert_core_Probe_To_v1_Probe is an autogenerated conversion function. +func Convert_core_Probe_To_v1_Probe(in *core.Probe, out *v1.Probe, s conversion.Scope) error { + return autoConvert_core_Probe_To_v1_Probe(in, out, s) +} + +func autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]core.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in *v1.ProjectedVolumeSource, out *core.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ProjectedVolumeSource_To_core_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + out.Sources = *(*[]v1.VolumeProjection)(unsafe.Pointer(&in.Sources)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + return nil +} + +// Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource is an autogenerated conversion function. +func Convert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in *core.ProjectedVolumeSource, out *v1.ProjectedVolumeSource, s conversion.Scope) error { + return autoConvert_core_ProjectedVolumeSource_To_v1_ProjectedVolumeSource(in, out, s) +} + +func autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in *v1.QuobyteVolumeSource, out *core.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_v1_QuobyteVolumeSource_To_core_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + out.Registry = in.Registry + out.Volume = in.Volume + out.ReadOnly = in.ReadOnly + out.User = in.User + out.Group = in.Group + return nil +} + +// Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource is an autogenerated conversion function. +func Convert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in *core.QuobyteVolumeSource, out *v1.QuobyteVolumeSource, s conversion.Scope) error { + return autoConvert_core_QuobyteVolumeSource_To_v1_QuobyteVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in *v1.RBDPersistentVolumeSource, out *core.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDPersistentVolumeSource_To_core_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in *core.RBDPersistentVolumeSource, out *v1.RBDPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDPersistentVolumeSource_To_v1_RBDPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource is an autogenerated conversion function. +func Convert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in *v1.RBDVolumeSource, out *core.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_v1_RBDVolumeSource_To_core_RBDVolumeSource(in, out, s) +} + +func autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + out.CephMonitors = *(*[]string)(unsafe.Pointer(&in.CephMonitors)) + out.RBDImage = in.RBDImage + out.FSType = in.FSType + out.RBDPool = in.RBDPool + out.RadosUser = in.RadosUser + out.Keyring = in.Keyring + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource is an autogenerated conversion function. +func Convert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in *core.RBDVolumeSource, out *v1.RBDVolumeSource, s conversion.Scope) error { + return autoConvert_core_RBDVolumeSource_To_v1_RBDVolumeSource(in, out, s) +} + +func autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_v1_RangeAllocation_To_core_RangeAllocation is an autogenerated conversion function. +func Convert_v1_RangeAllocation_To_core_RangeAllocation(in *v1.RangeAllocation, out *core.RangeAllocation, s conversion.Scope) error { + return autoConvert_v1_RangeAllocation_To_core_RangeAllocation(in, out, s) +} + +func autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Range = in.Range + out.Data = *(*[]byte)(unsafe.Pointer(&in.Data)) + return nil +} + +// Convert_core_RangeAllocation_To_v1_RangeAllocation is an autogenerated conversion function. +func Convert_core_RangeAllocation_To_v1_RangeAllocation(in *core.RangeAllocation, out *v1.RangeAllocation, s conversion.Scope) error { + return autoConvert_core_RangeAllocation_To_v1_RangeAllocation(in, out, s) +} + +func autoConvert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ReplicationController_To_core_ReplicationController is an autogenerated conversion function. +func Convert_v1_ReplicationController_To_core_ReplicationController(in *v1.ReplicationController, out *core.ReplicationController, s conversion.Scope) error { + return autoConvert_v1_ReplicationController_To_core_ReplicationController(in, out, s) +} + +func autoConvert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ReplicationController_To_v1_ReplicationController is an autogenerated conversion function. +func Convert_core_ReplicationController_To_v1_ReplicationController(in *core.ReplicationController, out *v1.ReplicationController, s conversion.Scope) error { + return autoConvert_core_ReplicationController_To_v1_ReplicationController(in, out, s) +} + +func autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = core.ReplicationControllerConditionType(in.Type) + out.Status = core.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in *v1.ReplicationControllerCondition, out *core.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerCondition_To_core_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + out.Type = v1.ReplicationControllerConditionType(in.Type) + out.Status = v1.ConditionStatus(in.Status) + out.LastTransitionTime = in.LastTransitionTime + out.Reason = in.Reason + out.Message = in.Message + return nil +} + +// Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition is an autogenerated conversion function. +func Convert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in *core.ReplicationControllerCondition, out *v1.ReplicationControllerCondition, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerCondition_To_v1_ReplicationControllerCondition(in, out, s) +} + +func autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_v1_ReplicationController_To_core_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList is an autogenerated conversion function. +func Convert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in *v1.ReplicationControllerList, out *core.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerList_To_core_ReplicationControllerList(in, out, s) +} + +func autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.ReplicationController, len(*in)) + for i := range *in { + if err := Convert_core_ReplicationController_To_v1_ReplicationController(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList is an autogenerated conversion function. +func Convert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in *core.ReplicationControllerList, out *v1.ReplicationControllerList, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerList_To_v1_ReplicationControllerList(in, out, s) +} + +func autoConvert_v1_ReplicationControllerSpec_To_core_ReplicationControllerSpec(in *v1.ReplicationControllerSpec, out *core.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_Pointer_int32_To_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(core.PodTemplateSpec) + if err := Convert_v1_PodTemplateSpec_To_core_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_core_ReplicationControllerSpec_To_v1_ReplicationControllerSpec(in *core.ReplicationControllerSpec, out *v1.ReplicationControllerSpec, s conversion.Scope) error { + if err := meta_v1.Convert_int32_To_Pointer_int32(&in.Replicas, &out.Replicas, s); err != nil { + return err + } + out.MinReadySeconds = in.MinReadySeconds + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(v1.PodTemplateSpec) + if err := Convert_core_PodTemplateSpec_To_v1_PodTemplateSpec(*in, *out, s); err != nil { + return err + } + } else { + out.Template = nil + } + return nil +} + +func autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]core.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in *v1.ReplicationControllerStatus, out *core.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_v1_ReplicationControllerStatus_To_core_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + out.Replicas = in.Replicas + out.FullyLabeledReplicas = in.FullyLabeledReplicas + out.ReadyReplicas = in.ReadyReplicas + out.AvailableReplicas = in.AvailableReplicas + out.ObservedGeneration = in.ObservedGeneration + out.Conditions = *(*[]v1.ReplicationControllerCondition)(unsafe.Pointer(&in.Conditions)) + return nil +} + +// Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus is an autogenerated conversion function. +func Convert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in *core.ReplicationControllerStatus, out *v1.ReplicationControllerStatus, s conversion.Scope) error { + return autoConvert_core_ReplicationControllerStatus_To_v1_ReplicationControllerStatus(in, out, s) +} + +func autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector is an autogenerated conversion function. +func Convert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in *v1.ResourceFieldSelector, out *core.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_v1_ResourceFieldSelector_To_core_ResourceFieldSelector(in, out, s) +} + +func autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + out.ContainerName = in.ContainerName + out.Resource = in.Resource + out.Divisor = in.Divisor + return nil +} + +// Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector is an autogenerated conversion function. +func Convert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in *core.ResourceFieldSelector, out *v1.ResourceFieldSelector, s conversion.Scope) error { + return autoConvert_core_ResourceFieldSelector_To_v1_ResourceFieldSelector(in, out, s) +} + +func autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ResourceQuota_To_core_ResourceQuota is an autogenerated conversion function. +func Convert_v1_ResourceQuota_To_core_ResourceQuota(in *v1.ResourceQuota, out *core.ResourceQuota, s conversion.Scope) error { + return autoConvert_v1_ResourceQuota_To_core_ResourceQuota(in, out, s) +} + +func autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_ResourceQuota_To_v1_ResourceQuota is an autogenerated conversion function. +func Convert_core_ResourceQuota_To_v1_ResourceQuota(in *core.ResourceQuota, out *v1.ResourceQuota, s conversion.Scope) error { + return autoConvert_core_ResourceQuota_To_v1_ResourceQuota(in, out, s) +} + +func autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList is an autogenerated conversion function. +func Convert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in *v1.ResourceQuotaList, out *core.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaList_To_core_ResourceQuotaList(in, out, s) +} + +func autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ResourceQuota)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList is an autogenerated conversion function. +func Convert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in *core.ResourceQuotaList, out *v1.ResourceQuotaList, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaList_To_v1_ResourceQuotaList(in, out, s) +} + +func autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]core.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in *v1.ResourceQuotaSpec, out *core.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaSpec_To_core_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Scopes = *(*[]v1.ResourceQuotaScope)(unsafe.Pointer(&in.Scopes)) + return nil +} + +// Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec is an autogenerated conversion function. +func Convert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in *core.ResourceQuotaSpec, out *v1.ResourceQuotaSpec, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaSpec_To_v1_ResourceQuotaSpec(in, out, s) +} + +func autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*core.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*core.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in *v1.ResourceQuotaStatus, out *core.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_v1_ResourceQuotaStatus_To_core_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + out.Hard = *(*v1.ResourceList)(unsafe.Pointer(&in.Hard)) + out.Used = *(*v1.ResourceList)(unsafe.Pointer(&in.Used)) + return nil +} + +// Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus is an autogenerated conversion function. +func Convert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in *core.ResourceQuotaStatus, out *v1.ResourceQuotaStatus, s conversion.Scope) error { + return autoConvert_core_ResourceQuotaStatus_To_v1_ResourceQuotaStatus(in, out, s) +} + +func autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*core.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*core.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_v1_ResourceRequirements_To_core_ResourceRequirements is an autogenerated conversion function. +func Convert_v1_ResourceRequirements_To_core_ResourceRequirements(in *v1.ResourceRequirements, out *core.ResourceRequirements, s conversion.Scope) error { + return autoConvert_v1_ResourceRequirements_To_core_ResourceRequirements(in, out, s) +} + +func autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + out.Limits = *(*v1.ResourceList)(unsafe.Pointer(&in.Limits)) + out.Requests = *(*v1.ResourceList)(unsafe.Pointer(&in.Requests)) + return nil +} + +// Convert_core_ResourceRequirements_To_v1_ResourceRequirements is an autogenerated conversion function. +func Convert_core_ResourceRequirements_To_v1_ResourceRequirements(in *core.ResourceRequirements, out *v1.ResourceRequirements, s conversion.Scope) error { + return autoConvert_core_ResourceRequirements_To_v1_ResourceRequirements(in, out, s) +} + +func autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_v1_SELinuxOptions_To_core_SELinuxOptions is an autogenerated conversion function. +func Convert_v1_SELinuxOptions_To_core_SELinuxOptions(in *v1.SELinuxOptions, out *core.SELinuxOptions, s conversion.Scope) error { + return autoConvert_v1_SELinuxOptions_To_core_SELinuxOptions(in, out, s) +} + +func autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + out.User = in.User + out.Role = in.Role + out.Type = in.Type + out.Level = in.Level + return nil +} + +// Convert_core_SELinuxOptions_To_v1_SELinuxOptions is an autogenerated conversion function. +func Convert_core_SELinuxOptions_To_v1_SELinuxOptions(in *core.SELinuxOptions, out *v1.SELinuxOptions, s conversion.Scope) error { + return autoConvert_core_SELinuxOptions_To_v1_SELinuxOptions(in, out, s) +} + +func autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in *v1.ScaleIOPersistentVolumeSource, out *core.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOPersistentVolumeSource_To_core_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.SecretReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in *core.ScaleIOPersistentVolumeSource, out *v1.ScaleIOPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOPersistentVolumeSource_To_v1_ScaleIOPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in *v1.ScaleIOVolumeSource, out *core.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_v1_ScaleIOVolumeSource_To_core_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + out.Gateway = in.Gateway + out.System = in.System + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + out.SSLEnabled = in.SSLEnabled + out.ProtectionDomain = in.ProtectionDomain + out.StoragePool = in.StoragePool + out.StorageMode = in.StorageMode + out.VolumeName = in.VolumeName + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + return nil +} + +// Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource is an autogenerated conversion function. +func Convert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in *core.ScaleIOVolumeSource, out *v1.ScaleIOVolumeSource, s conversion.Scope) error { + return autoConvert_core_ScaleIOVolumeSource_To_v1_ScaleIOVolumeSource(in, out, s) +} + +func autoConvert_v1_Secret_To_core_Secret(in *v1.Secret, out *core.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + // INFO: in.StringData opted out of conversion generation + out.Type = core.SecretType(in.Type) + return nil +} + +func autoConvert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Data = *(*map[string][]byte)(unsafe.Pointer(&in.Data)) + out.Type = v1.SecretType(in.Type) + return nil +} + +// Convert_core_Secret_To_v1_Secret is an autogenerated conversion function. +func Convert_core_Secret_To_v1_Secret(in *core.Secret, out *v1.Secret, s conversion.Scope) error { + return autoConvert_core_Secret_To_v1_Secret(in, out, s) +} + +func autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretEnvSource_To_core_SecretEnvSource is an autogenerated conversion function. +func Convert_v1_SecretEnvSource_To_core_SecretEnvSource(in *v1.SecretEnvSource, out *core.SecretEnvSource, s conversion.Scope) error { + return autoConvert_v1_SecretEnvSource_To_core_SecretEnvSource(in, out, s) +} + +func autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretEnvSource_To_v1_SecretEnvSource is an autogenerated conversion function. +func Convert_core_SecretEnvSource_To_v1_SecretEnvSource(in *core.SecretEnvSource, out *v1.SecretEnvSource, s conversion.Scope) error { + return autoConvert_core_SecretEnvSource_To_v1_SecretEnvSource(in, out, s) +} + +func autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretKeySelector_To_core_SecretKeySelector is an autogenerated conversion function. +func Convert_v1_SecretKeySelector_To_core_SecretKeySelector(in *v1.SecretKeySelector, out *core.SecretKeySelector, s conversion.Scope) error { + return autoConvert_v1_SecretKeySelector_To_core_SecretKeySelector(in, out, s) +} + +func autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Key = in.Key + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretKeySelector_To_v1_SecretKeySelector is an autogenerated conversion function. +func Convert_core_SecretKeySelector_To_v1_SecretKeySelector(in *core.SecretKeySelector, out *v1.SecretKeySelector, s conversion.Scope) error { + return autoConvert_core_SecretKeySelector_To_v1_SecretKeySelector(in, out, s) +} + +func autoConvert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Secret, len(*in)) + for i := range *in { + if err := Convert_v1_Secret_To_core_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_SecretList_To_core_SecretList is an autogenerated conversion function. +func Convert_v1_SecretList_To_core_SecretList(in *v1.SecretList, out *core.SecretList, s conversion.Scope) error { + return autoConvert_v1_SecretList_To_core_SecretList(in, out, s) +} + +func autoConvert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Secret, len(*in)) + for i := range *in { + if err := Convert_core_Secret_To_v1_Secret(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_SecretList_To_v1_SecretList is an autogenerated conversion function. +func Convert_core_SecretList_To_v1_SecretList(in *core.SecretList, out *v1.SecretList, s conversion.Scope) error { + return autoConvert_core_SecretList_To_v1_SecretList(in, out, s) +} + +func autoConvert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + if err := Convert_v1_LocalObjectReference_To_core_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretProjection_To_core_SecretProjection is an autogenerated conversion function. +func Convert_v1_SecretProjection_To_core_SecretProjection(in *v1.SecretProjection, out *core.SecretProjection, s conversion.Scope) error { + return autoConvert_v1_SecretProjection_To_core_SecretProjection(in, out, s) +} + +func autoConvert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + if err := Convert_core_LocalObjectReference_To_v1_LocalObjectReference(&in.LocalObjectReference, &out.LocalObjectReference, s); err != nil { + return err + } + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretProjection_To_v1_SecretProjection is an autogenerated conversion function. +func Convert_core_SecretProjection_To_v1_SecretProjection(in *core.SecretProjection, out *v1.SecretProjection, s conversion.Scope) error { + return autoConvert_core_SecretProjection_To_v1_SecretProjection(in, out, s) +} + +func autoConvert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_v1_SecretReference_To_core_SecretReference is an autogenerated conversion function. +func Convert_v1_SecretReference_To_core_SecretReference(in *v1.SecretReference, out *core.SecretReference, s conversion.Scope) error { + return autoConvert_v1_SecretReference_To_core_SecretReference(in, out, s) +} + +func autoConvert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + out.Name = in.Name + out.Namespace = in.Namespace + return nil +} + +// Convert_core_SecretReference_To_v1_SecretReference is an autogenerated conversion function. +func Convert_core_SecretReference_To_v1_SecretReference(in *core.SecretReference, out *v1.SecretReference, s conversion.Scope) error { + return autoConvert_core_SecretReference_To_v1_SecretReference(in, out, s) +} + +func autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]core.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource is an autogenerated conversion function. +func Convert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in *v1.SecretVolumeSource, out *core.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_v1_SecretVolumeSource_To_core_SecretVolumeSource(in, out, s) +} + +func autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + out.SecretName = in.SecretName + out.Items = *(*[]v1.KeyToPath)(unsafe.Pointer(&in.Items)) + out.DefaultMode = (*int32)(unsafe.Pointer(in.DefaultMode)) + out.Optional = (*bool)(unsafe.Pointer(in.Optional)) + return nil +} + +// Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource is an autogenerated conversion function. +func Convert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in *core.SecretVolumeSource, out *v1.SecretVolumeSource, s conversion.Scope) error { + return autoConvert_core_SecretVolumeSource_To_v1_SecretVolumeSource(in, out, s) +} + +func autoConvert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*core.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*core.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +// Convert_v1_SecurityContext_To_core_SecurityContext is an autogenerated conversion function. +func Convert_v1_SecurityContext_To_core_SecurityContext(in *v1.SecurityContext, out *core.SecurityContext, s conversion.Scope) error { + return autoConvert_v1_SecurityContext_To_core_SecurityContext(in, out, s) +} + +func autoConvert_core_SecurityContext_To_v1_SecurityContext(in *core.SecurityContext, out *v1.SecurityContext, s conversion.Scope) error { + out.Capabilities = (*v1.Capabilities)(unsafe.Pointer(in.Capabilities)) + out.Privileged = (*bool)(unsafe.Pointer(in.Privileged)) + out.SELinuxOptions = (*v1.SELinuxOptions)(unsafe.Pointer(in.SELinuxOptions)) + out.RunAsUser = (*int64)(unsafe.Pointer(in.RunAsUser)) + out.RunAsNonRoot = (*bool)(unsafe.Pointer(in.RunAsNonRoot)) + out.ReadOnlyRootFilesystem = (*bool)(unsafe.Pointer(in.ReadOnlyRootFilesystem)) + out.AllowPrivilegeEscalation = (*bool)(unsafe.Pointer(in.AllowPrivilegeEscalation)) + return nil +} + +func autoConvert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + if err := Convert_v1_ObjectReference_To_core_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_v1_SerializedReference_To_core_SerializedReference is an autogenerated conversion function. +func Convert_v1_SerializedReference_To_core_SerializedReference(in *v1.SerializedReference, out *core.SerializedReference, s conversion.Scope) error { + return autoConvert_v1_SerializedReference_To_core_SerializedReference(in, out, s) +} + +func autoConvert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + if err := Convert_core_ObjectReference_To_v1_ObjectReference(&in.Reference, &out.Reference, s); err != nil { + return err + } + return nil +} + +// Convert_core_SerializedReference_To_v1_SerializedReference is an autogenerated conversion function. +func Convert_core_SerializedReference_To_v1_SerializedReference(in *core.SerializedReference, out *v1.SerializedReference, s conversion.Scope) error { + return autoConvert_core_SerializedReference_To_v1_SerializedReference(in, out, s) +} + +func autoConvert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_ServiceSpec_To_core_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_v1_ServiceStatus_To_core_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Service_To_core_Service is an autogenerated conversion function. +func Convert_v1_Service_To_core_Service(in *v1.Service, out *core.Service, s conversion.Scope) error { + return autoConvert_v1_Service_To_core_Service(in, out, s) +} + +func autoConvert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_core_ServiceSpec_To_v1_ServiceSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + if err := Convert_core_ServiceStatus_To_v1_ServiceStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_core_Service_To_v1_Service is an autogenerated conversion function. +func Convert_core_Service_To_v1_Service(in *core.Service, out *v1.Service, s conversion.Scope) error { + return autoConvert_core_Service_To_v1_Service(in, out, s) +} + +func autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]core.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]core.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_v1_ServiceAccount_To_core_ServiceAccount is an autogenerated conversion function. +func Convert_v1_ServiceAccount_To_core_ServiceAccount(in *v1.ServiceAccount, out *core.ServiceAccount, s conversion.Scope) error { + return autoConvert_v1_ServiceAccount_To_core_ServiceAccount(in, out, s) +} + +func autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Secrets = *(*[]v1.ObjectReference)(unsafe.Pointer(&in.Secrets)) + out.ImagePullSecrets = *(*[]v1.LocalObjectReference)(unsafe.Pointer(&in.ImagePullSecrets)) + out.AutomountServiceAccountToken = (*bool)(unsafe.Pointer(in.AutomountServiceAccountToken)) + return nil +} + +// Convert_core_ServiceAccount_To_v1_ServiceAccount is an autogenerated conversion function. +func Convert_core_ServiceAccount_To_v1_ServiceAccount(in *core.ServiceAccount, out *v1.ServiceAccount, s conversion.Scope) error { + return autoConvert_core_ServiceAccount_To_v1_ServiceAccount(in, out, s) +} + +func autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]core.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_ServiceAccountList_To_core_ServiceAccountList is an autogenerated conversion function. +func Convert_v1_ServiceAccountList_To_core_ServiceAccountList(in *v1.ServiceAccountList, out *core.ServiceAccountList, s conversion.Scope) error { + return autoConvert_v1_ServiceAccountList_To_core_ServiceAccountList(in, out, s) +} + +func autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1.ServiceAccount)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_core_ServiceAccountList_To_v1_ServiceAccountList is an autogenerated conversion function. +func Convert_core_ServiceAccountList_To_v1_ServiceAccountList(in *core.ServiceAccountList, out *v1.ServiceAccountList, s conversion.Scope) error { + return autoConvert_core_ServiceAccountList_To_v1_ServiceAccountList(in, out, s) +} + +func autoConvert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]core.Service, len(*in)) + for i := range *in { + if err := Convert_v1_Service_To_core_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_v1_ServiceList_To_core_ServiceList is an autogenerated conversion function. +func Convert_v1_ServiceList_To_core_ServiceList(in *v1.ServiceList, out *core.ServiceList, s conversion.Scope) error { + return autoConvert_v1_ServiceList_To_core_ServiceList(in, out, s) +} + +func autoConvert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]v1.Service, len(*in)) + for i := range *in { + if err := Convert_core_Service_To_v1_Service(&(*in)[i], &(*out)[i], s); err != nil { + return err + } + } + } else { + out.Items = nil + } + return nil +} + +// Convert_core_ServiceList_To_v1_ServiceList is an autogenerated conversion function. +func Convert_core_ServiceList_To_v1_ServiceList(in *core.ServiceList, out *v1.ServiceList, s conversion.Scope) error { + return autoConvert_core_ServiceList_To_v1_ServiceList(in, out, s) +} + +func autoConvert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = core.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_v1_ServicePort_To_core_ServicePort is an autogenerated conversion function. +func Convert_v1_ServicePort_To_core_ServicePort(in *v1.ServicePort, out *core.ServicePort, s conversion.Scope) error { + return autoConvert_v1_ServicePort_To_core_ServicePort(in, out, s) +} + +func autoConvert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + out.Name = in.Name + out.Protocol = v1.Protocol(in.Protocol) + out.Port = in.Port + out.TargetPort = in.TargetPort + out.NodePort = in.NodePort + return nil +} + +// Convert_core_ServicePort_To_v1_ServicePort is an autogenerated conversion function. +func Convert_core_ServicePort_To_v1_ServicePort(in *core.ServicePort, out *v1.ServicePort, s conversion.Scope) error { + return autoConvert_core_ServicePort_To_v1_ServicePort(in, out, s) +} + +func autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions is an autogenerated conversion function. +func Convert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in *v1.ServiceProxyOptions, out *core.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_v1_ServiceProxyOptions_To_core_ServiceProxyOptions(in, out, s) +} + +func autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + out.Path = in.Path + return nil +} + +// Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions is an autogenerated conversion function. +func Convert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in *core.ServiceProxyOptions, out *v1.ServiceProxyOptions, s conversion.Scope) error { + return autoConvert_core_ServiceProxyOptions_To_v1_ServiceProxyOptions(in, out, s) +} + +func autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + out.Ports = *(*[]core.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.Type = core.ServiceType(in.Type) + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.SessionAffinity = core.ServiceAffinity(in.SessionAffinity) + out.LoadBalancerIP = in.LoadBalancerIP + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalName = in.ExternalName + out.ExternalTrafficPolicy = core.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + out.SessionAffinityConfig = (*core.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + return nil +} + +// Convert_v1_ServiceSpec_To_core_ServiceSpec is an autogenerated conversion function. +func Convert_v1_ServiceSpec_To_core_ServiceSpec(in *v1.ServiceSpec, out *core.ServiceSpec, s conversion.Scope) error { + return autoConvert_v1_ServiceSpec_To_core_ServiceSpec(in, out, s) +} + +func autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + out.Type = v1.ServiceType(in.Type) + out.Ports = *(*[]v1.ServicePort)(unsafe.Pointer(&in.Ports)) + out.Selector = *(*map[string]string)(unsafe.Pointer(&in.Selector)) + out.ClusterIP = in.ClusterIP + out.ExternalName = in.ExternalName + out.ExternalIPs = *(*[]string)(unsafe.Pointer(&in.ExternalIPs)) + out.LoadBalancerIP = in.LoadBalancerIP + out.SessionAffinity = v1.ServiceAffinity(in.SessionAffinity) + out.SessionAffinityConfig = (*v1.SessionAffinityConfig)(unsafe.Pointer(in.SessionAffinityConfig)) + out.LoadBalancerSourceRanges = *(*[]string)(unsafe.Pointer(&in.LoadBalancerSourceRanges)) + out.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(in.ExternalTrafficPolicy) + out.HealthCheckNodePort = in.HealthCheckNodePort + out.PublishNotReadyAddresses = in.PublishNotReadyAddresses + return nil +} + +// Convert_core_ServiceSpec_To_v1_ServiceSpec is an autogenerated conversion function. +func Convert_core_ServiceSpec_To_v1_ServiceSpec(in *core.ServiceSpec, out *v1.ServiceSpec, s conversion.Scope) error { + return autoConvert_core_ServiceSpec_To_v1_ServiceSpec(in, out, s) +} + +func autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + if err := Convert_v1_LoadBalancerStatus_To_core_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_v1_ServiceStatus_To_core_ServiceStatus is an autogenerated conversion function. +func Convert_v1_ServiceStatus_To_core_ServiceStatus(in *v1.ServiceStatus, out *core.ServiceStatus, s conversion.Scope) error { + return autoConvert_v1_ServiceStatus_To_core_ServiceStatus(in, out, s) +} + +func autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + if err := Convert_core_LoadBalancerStatus_To_v1_LoadBalancerStatus(&in.LoadBalancer, &out.LoadBalancer, s); err != nil { + return err + } + return nil +} + +// Convert_core_ServiceStatus_To_v1_ServiceStatus is an autogenerated conversion function. +func Convert_core_ServiceStatus_To_v1_ServiceStatus(in *core.ServiceStatus, out *v1.ServiceStatus, s conversion.Scope) error { + return autoConvert_core_ServiceStatus_To_v1_ServiceStatus(in, out, s) +} + +func autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*core.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig is an autogenerated conversion function. +func Convert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in *v1.SessionAffinityConfig, out *core.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_v1_SessionAffinityConfig_To_core_SessionAffinityConfig(in, out, s) +} + +func autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + out.ClientIP = (*v1.ClientIPConfig)(unsafe.Pointer(in.ClientIP)) + return nil +} + +// Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig is an autogenerated conversion function. +func Convert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in *core.SessionAffinityConfig, out *v1.SessionAffinityConfig, s conversion.Scope) error { + return autoConvert_core_SessionAffinityConfig_To_v1_SessionAffinityConfig(in, out, s) +} + +func autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in *v1.StorageOSPersistentVolumeSource, out *core.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSPersistentVolumeSource_To_core_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.ObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in *core.StorageOSPersistentVolumeSource, out *v1.StorageOSPersistentVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSPersistentVolumeSource_To_v1_StorageOSPersistentVolumeSource(in, out, s) +} + +func autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*core.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in *v1.StorageOSVolumeSource, out *core.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_v1_StorageOSVolumeSource_To_core_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + out.VolumeName = in.VolumeName + out.VolumeNamespace = in.VolumeNamespace + out.FSType = in.FSType + out.ReadOnly = in.ReadOnly + out.SecretRef = (*v1.LocalObjectReference)(unsafe.Pointer(in.SecretRef)) + return nil +} + +// Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource is an autogenerated conversion function. +func Convert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in *core.StorageOSVolumeSource, out *v1.StorageOSVolumeSource, s conversion.Scope) error { + return autoConvert_core_StorageOSVolumeSource_To_v1_StorageOSVolumeSource(in, out, s) +} + +func autoConvert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_v1_Sysctl_To_core_Sysctl is an autogenerated conversion function. +func Convert_v1_Sysctl_To_core_Sysctl(in *v1.Sysctl, out *core.Sysctl, s conversion.Scope) error { + return autoConvert_v1_Sysctl_To_core_Sysctl(in, out, s) +} + +func autoConvert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + out.Name = in.Name + out.Value = in.Value + return nil +} + +// Convert_core_Sysctl_To_v1_Sysctl is an autogenerated conversion function. +func Convert_core_Sysctl_To_v1_Sysctl(in *core.Sysctl, out *v1.Sysctl, s conversion.Scope) error { + return autoConvert_core_Sysctl_To_v1_Sysctl(in, out, s) +} + +func autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_v1_TCPSocketAction_To_core_TCPSocketAction is an autogenerated conversion function. +func Convert_v1_TCPSocketAction_To_core_TCPSocketAction(in *v1.TCPSocketAction, out *core.TCPSocketAction, s conversion.Scope) error { + return autoConvert_v1_TCPSocketAction_To_core_TCPSocketAction(in, out, s) +} + +func autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + out.Port = in.Port + out.Host = in.Host + return nil +} + +// Convert_core_TCPSocketAction_To_v1_TCPSocketAction is an autogenerated conversion function. +func Convert_core_TCPSocketAction_To_v1_TCPSocketAction(in *core.TCPSocketAction, out *v1.TCPSocketAction, s conversion.Scope) error { + return autoConvert_core_TCPSocketAction_To_v1_TCPSocketAction(in, out, s) +} + +func autoConvert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_v1_Taint_To_core_Taint is an autogenerated conversion function. +func Convert_v1_Taint_To_core_Taint(in *v1.Taint, out *core.Taint, s conversion.Scope) error { + return autoConvert_v1_Taint_To_core_Taint(in, out, s) +} + +func autoConvert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + out.Key = in.Key + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TimeAdded = (*meta_v1.Time)(unsafe.Pointer(in.TimeAdded)) + return nil +} + +// Convert_core_Taint_To_v1_Taint is an autogenerated conversion function. +func Convert_core_Taint_To_v1_Taint(in *core.Taint, out *v1.Taint, s conversion.Scope) error { + return autoConvert_core_Taint_To_v1_Taint(in, out, s) +} + +func autoConvert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = core.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = core.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_v1_Toleration_To_core_Toleration is an autogenerated conversion function. +func Convert_v1_Toleration_To_core_Toleration(in *v1.Toleration, out *core.Toleration, s conversion.Scope) error { + return autoConvert_v1_Toleration_To_core_Toleration(in, out, s) +} + +func autoConvert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + out.Key = in.Key + out.Operator = v1.TolerationOperator(in.Operator) + out.Value = in.Value + out.Effect = v1.TaintEffect(in.Effect) + out.TolerationSeconds = (*int64)(unsafe.Pointer(in.TolerationSeconds)) + return nil +} + +// Convert_core_Toleration_To_v1_Toleration is an autogenerated conversion function. +func Convert_core_Toleration_To_v1_Toleration(in *core.Toleration, out *v1.Toleration, s conversion.Scope) error { + return autoConvert_core_Toleration_To_v1_Toleration(in, out, s) +} + +func autoConvert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_v1_VolumeSource_To_core_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_v1_Volume_To_core_Volume is an autogenerated conversion function. +func Convert_v1_Volume_To_core_Volume(in *v1.Volume, out *core.Volume, s conversion.Scope) error { + return autoConvert_v1_Volume_To_core_Volume(in, out, s) +} + +func autoConvert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + out.Name = in.Name + if err := Convert_core_VolumeSource_To_v1_VolumeSource(&in.VolumeSource, &out.VolumeSource, s); err != nil { + return err + } + return nil +} + +// Convert_core_Volume_To_v1_Volume is an autogenerated conversion function. +func Convert_core_Volume_To_v1_Volume(in *core.Volume, out *v1.Volume, s conversion.Scope) error { + return autoConvert_core_Volume_To_v1_Volume(in, out, s) +} + +func autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_v1_VolumeDevice_To_core_VolumeDevice is an autogenerated conversion function. +func Convert_v1_VolumeDevice_To_core_VolumeDevice(in *v1.VolumeDevice, out *core.VolumeDevice, s conversion.Scope) error { + return autoConvert_v1_VolumeDevice_To_core_VolumeDevice(in, out, s) +} + +func autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + out.Name = in.Name + out.DevicePath = in.DevicePath + return nil +} + +// Convert_core_VolumeDevice_To_v1_VolumeDevice is an autogenerated conversion function. +func Convert_core_VolumeDevice_To_v1_VolumeDevice(in *core.VolumeDevice, out *v1.VolumeDevice, s conversion.Scope) error { + return autoConvert_core_VolumeDevice_To_v1_VolumeDevice(in, out, s) +} + +func autoConvert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*core.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_v1_VolumeMount_To_core_VolumeMount is an autogenerated conversion function. +func Convert_v1_VolumeMount_To_core_VolumeMount(in *v1.VolumeMount, out *core.VolumeMount, s conversion.Scope) error { + return autoConvert_v1_VolumeMount_To_core_VolumeMount(in, out, s) +} + +func autoConvert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + out.Name = in.Name + out.ReadOnly = in.ReadOnly + out.MountPath = in.MountPath + out.SubPath = in.SubPath + out.MountPropagation = (*v1.MountPropagationMode)(unsafe.Pointer(in.MountPropagation)) + return nil +} + +// Convert_core_VolumeMount_To_v1_VolumeMount is an autogenerated conversion function. +func Convert_core_VolumeMount_To_v1_VolumeMount(in *core.VolumeMount, out *v1.VolumeMount, s conversion.Scope) error { + return autoConvert_core_VolumeMount_To_v1_VolumeMount(in, out, s) +} + +func autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + out.Secret = (*core.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*core.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*core.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_v1_VolumeProjection_To_core_VolumeProjection is an autogenerated conversion function. +func Convert_v1_VolumeProjection_To_core_VolumeProjection(in *v1.VolumeProjection, out *core.VolumeProjection, s conversion.Scope) error { + return autoConvert_v1_VolumeProjection_To_core_VolumeProjection(in, out, s) +} + +func autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + out.Secret = (*v1.SecretProjection)(unsafe.Pointer(in.Secret)) + out.DownwardAPI = (*v1.DownwardAPIProjection)(unsafe.Pointer(in.DownwardAPI)) + out.ConfigMap = (*v1.ConfigMapProjection)(unsafe.Pointer(in.ConfigMap)) + return nil +} + +// Convert_core_VolumeProjection_To_v1_VolumeProjection is an autogenerated conversion function. +func Convert_core_VolumeProjection_To_v1_VolumeProjection(in *core.VolumeProjection, out *v1.VolumeProjection, s conversion.Scope) error { + return autoConvert_core_VolumeProjection_To_v1_VolumeProjection(in, out, s) +} + +func autoConvert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + out.HostPath = (*core.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*core.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*core.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*core.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*core.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*core.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*core.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*core.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*core.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*core.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*core.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.FlexVolume = (*core.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*core.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*core.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*core.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*core.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*core.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*core.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*core.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*core.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.Quobyte = (*core.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.AzureDisk = (*core.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*core.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*core.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*core.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*core.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*core.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_v1_VolumeSource_To_core_VolumeSource is an autogenerated conversion function. +func Convert_v1_VolumeSource_To_core_VolumeSource(in *v1.VolumeSource, out *core.VolumeSource, s conversion.Scope) error { + return autoConvert_v1_VolumeSource_To_core_VolumeSource(in, out, s) +} + +func autoConvert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + out.HostPath = (*v1.HostPathVolumeSource)(unsafe.Pointer(in.HostPath)) + out.EmptyDir = (*v1.EmptyDirVolumeSource)(unsafe.Pointer(in.EmptyDir)) + out.GCEPersistentDisk = (*v1.GCEPersistentDiskVolumeSource)(unsafe.Pointer(in.GCEPersistentDisk)) + out.AWSElasticBlockStore = (*v1.AWSElasticBlockStoreVolumeSource)(unsafe.Pointer(in.AWSElasticBlockStore)) + out.GitRepo = (*v1.GitRepoVolumeSource)(unsafe.Pointer(in.GitRepo)) + out.Secret = (*v1.SecretVolumeSource)(unsafe.Pointer(in.Secret)) + out.NFS = (*v1.NFSVolumeSource)(unsafe.Pointer(in.NFS)) + out.ISCSI = (*v1.ISCSIVolumeSource)(unsafe.Pointer(in.ISCSI)) + out.Glusterfs = (*v1.GlusterfsVolumeSource)(unsafe.Pointer(in.Glusterfs)) + out.PersistentVolumeClaim = (*v1.PersistentVolumeClaimVolumeSource)(unsafe.Pointer(in.PersistentVolumeClaim)) + out.RBD = (*v1.RBDVolumeSource)(unsafe.Pointer(in.RBD)) + out.Quobyte = (*v1.QuobyteVolumeSource)(unsafe.Pointer(in.Quobyte)) + out.FlexVolume = (*v1.FlexVolumeSource)(unsafe.Pointer(in.FlexVolume)) + out.Cinder = (*v1.CinderVolumeSource)(unsafe.Pointer(in.Cinder)) + out.CephFS = (*v1.CephFSVolumeSource)(unsafe.Pointer(in.CephFS)) + out.Flocker = (*v1.FlockerVolumeSource)(unsafe.Pointer(in.Flocker)) + out.DownwardAPI = (*v1.DownwardAPIVolumeSource)(unsafe.Pointer(in.DownwardAPI)) + out.FC = (*v1.FCVolumeSource)(unsafe.Pointer(in.FC)) + out.AzureFile = (*v1.AzureFileVolumeSource)(unsafe.Pointer(in.AzureFile)) + out.ConfigMap = (*v1.ConfigMapVolumeSource)(unsafe.Pointer(in.ConfigMap)) + out.VsphereVolume = (*v1.VsphereVirtualDiskVolumeSource)(unsafe.Pointer(in.VsphereVolume)) + out.AzureDisk = (*v1.AzureDiskVolumeSource)(unsafe.Pointer(in.AzureDisk)) + out.PhotonPersistentDisk = (*v1.PhotonPersistentDiskVolumeSource)(unsafe.Pointer(in.PhotonPersistentDisk)) + out.Projected = (*v1.ProjectedVolumeSource)(unsafe.Pointer(in.Projected)) + out.PortworxVolume = (*v1.PortworxVolumeSource)(unsafe.Pointer(in.PortworxVolume)) + out.ScaleIO = (*v1.ScaleIOVolumeSource)(unsafe.Pointer(in.ScaleIO)) + out.StorageOS = (*v1.StorageOSVolumeSource)(unsafe.Pointer(in.StorageOS)) + return nil +} + +// Convert_core_VolumeSource_To_v1_VolumeSource is an autogenerated conversion function. +func Convert_core_VolumeSource_To_v1_VolumeSource(in *core.VolumeSource, out *v1.VolumeSource, s conversion.Scope) error { + return autoConvert_core_VolumeSource_To_v1_VolumeSource(in, out, s) +} + +func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in *v1.VsphereVirtualDiskVolumeSource, out *core.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_core_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + out.VolumePath = in.VolumePath + out.FSType = in.FSType + out.StoragePolicyName = in.StoragePolicyName + out.StoragePolicyID = in.StoragePolicyID + return nil +} + +// Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource is an autogenerated conversion function. +func Convert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *core.VsphereVirtualDiskVolumeSource, out *v1.VsphereVirtualDiskVolumeSource, s conversion.Scope) error { + return autoConvert_core_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s) +} + +func autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_v1_PodAffinityTerm_To_core_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in *v1.WeightedPodAffinityTerm, out *core.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_v1_WeightedPodAffinityTerm_To_core_WeightedPodAffinityTerm(in, out, s) +} + +func autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + out.Weight = in.Weight + if err := Convert_core_PodAffinityTerm_To_v1_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil { + return err + } + return nil +} + +// Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm is an autogenerated conversion function. +func Convert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in *core.WeightedPodAffinityTerm, out *v1.WeightedPodAffinityTerm, s conversion.Scope) error { + return autoConvert_core_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go similarity index 97% rename from vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go index 9779d2a1..c1ad46cd 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/v1/zz_generated.defaults.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/v1/zz_generated.defaults.go @@ -46,8 +46,6 @@ func RegisterDefaults(scheme *runtime.Scheme) error { }) scheme.AddTypeDefaultingFunc(&v1.PersistentVolumeList{}, func(obj interface{}) { SetObjectDefaults_PersistentVolumeList(obj.(*v1.PersistentVolumeList)) }) scheme.AddTypeDefaultingFunc(&v1.Pod{}, func(obj interface{}) { SetObjectDefaults_Pod(obj.(*v1.Pod)) }) - scheme.AddTypeDefaultingFunc(&v1.PodAttachOptions{}, func(obj interface{}) { SetObjectDefaults_PodAttachOptions(obj.(*v1.PodAttachOptions)) }) - scheme.AddTypeDefaultingFunc(&v1.PodExecOptions{}, func(obj interface{}) { SetObjectDefaults_PodExecOptions(obj.(*v1.PodExecOptions)) }) scheme.AddTypeDefaultingFunc(&v1.PodList{}, func(obj interface{}) { SetObjectDefaults_PodList(obj.(*v1.PodList)) }) scheme.AddTypeDefaultingFunc(&v1.PodTemplate{}, func(obj interface{}) { SetObjectDefaults_PodTemplate(obj.(*v1.PodTemplate)) }) scheme.AddTypeDefaultingFunc(&v1.PodTemplateList{}, func(obj interface{}) { SetObjectDefaults_PodTemplateList(obj.(*v1.PodTemplateList)) }) @@ -140,7 +138,7 @@ func SetObjectDefaults_PersistentVolume(in *v1.PersistentVolume) { SetDefaults_RBDPersistentVolumeSource(in.Spec.PersistentVolumeSource.RBD) } if in.Spec.PersistentVolumeSource.ISCSI != nil { - SetDefaults_ISCSIVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) + SetDefaults_ISCSIPersistentVolumeSource(in.Spec.PersistentVolumeSource.ISCSI) } if in.Spec.PersistentVolumeSource.AzureDisk != nil { SetDefaults_AzureDiskVolumeSource(in.Spec.PersistentVolumeSource.AzureDisk) @@ -308,14 +306,6 @@ func SetObjectDefaults_Pod(in *v1.Pod) { } } -func SetObjectDefaults_PodAttachOptions(in *v1.PodAttachOptions) { - SetDefaults_PodAttachOptions(in) -} - -func SetObjectDefaults_PodExecOptions(in *v1.PodExecOptions) { - SetDefaults_PodExecOptions(in) -} - func SetObjectDefaults_PodList(in *v1.PodList) { for i := range in.Items { a := &in.Items[i] diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go similarity index 90% rename from vendor/k8s.io/kubernetes/pkg/api/validation/doc.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go index 30f541de..0c1cfaab 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/doc.go @@ -16,4 +16,4 @@ limitations under the License. // Package validation has functions for validating the correctness of api // objects and explaining what is wrong with them when they aren't valid. -package validation // import "k8s.io/kubernetes/pkg/api/validation" +package validation // import "k8s.io/kubernetes/pkg/apis/core/validation" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go new file mode 100644 index 00000000..ab265bd7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/events.go @@ -0,0 +1,129 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "time" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/core" +) + +const ( + ReportingInstanceLengthLimit = 128 + ActionLengthLimit = 128 + ReasonLengthLimit = 128 + NoteLengthLimit = 1024 +) + +// ValidateEvent makes sure that the event makes sense. +func ValidateEvent(event *core.Event) field.ErrorList { + allErrs := field.ErrorList{} + // Because go + zeroTime := time.Time{} + + // "New" Events need to have EventTime set, so it's validating old object. + if event.EventTime.Time == zeroTime { + // Make sure event.Namespace and the involvedInvolvedObject.Namespace agree + if len(event.InvolvedObject.Namespace) == 0 { + // event.Namespace must also be empty (or "default", for compatibility with old clients) + if event.Namespace != metav1.NamespaceNone && event.Namespace != metav1.NamespaceDefault { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } else { + // event namespace must match + if event.Namespace != event.InvolvedObject.Namespace { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + } + + } else { + if len(event.InvolvedObject.Namespace) == 0 && event.Namespace != metav1.NamespaceSystem { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match event.namespace")) + } + if len(event.ReportingController) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingController"), "")) + } + for _, msg := range validation.IsQualifiedName(event.ReportingController) { + allErrs = append(allErrs, field.Invalid(field.NewPath("reportingController"), event.ReportingController, msg)) + } + if len(event.ReportingInstance) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reportingInstance"), "")) + } + if len(event.ReportingInstance) > ReportingInstanceLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("repotingIntance"), "", fmt.Sprintf("can have at most %v characters", ReportingInstanceLengthLimit))) + } + if len(event.Action) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("action"), "")) + } + if len(event.Action) > ActionLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("action"), "", fmt.Sprintf("can have at most %v characters", ActionLengthLimit))) + } + if len(event.Reason) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("reason"), "")) + } + if len(event.Reason) > ReasonLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("reason"), "", fmt.Sprintf("can have at most %v characters", ReasonLengthLimit))) + } + if len(event.Message) > NoteLengthLimit { + allErrs = append(allErrs, field.Invalid(field.NewPath("message"), "", fmt.Sprintf("can have at most %v characters", NoteLengthLimit))) + } + } + + // For kinds we recognize, make sure InvolvedObject.Namespace is set for namespaced kinds + if namespaced, err := isNamespacedKind(event.InvolvedObject.Kind, event.InvolvedObject.APIVersion); err == nil { + if namespaced && len(event.InvolvedObject.Namespace) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("involvedObject", "namespace"), fmt.Sprintf("required for kind %s", event.InvolvedObject.Kind))) + } + if !namespaced && len(event.InvolvedObject.Namespace) > 0 { + allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, fmt.Sprintf("not allowed for kind %s", event.InvolvedObject.Kind))) + } + } + + for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) { + allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg)) + } + return allErrs +} + +// Check whether the kind in groupVersion is scoped at the root of the api hierarchy +func isNamespacedKind(kind, groupVersion string) (bool, error) { + gv, err := schema.ParseGroupVersion(groupVersion) + if err != nil { + return false, err + } + g, err := legacyscheme.Registry.Group(gv.Group) + if err != nil { + return false, err + } + + restMapping, err := g.RESTMapper.RESTMapping(schema.GroupKind{Group: gv.Group, Kind: kind}, gv.Version) + if err != nil { + return false, err + } + scopeName := restMapping.Scope.Name() + if scopeName == meta.RESTScopeNameNamespace { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go similarity index 79% rename from vendor/k8s.io/kubernetes/pkg/api/validation/validation.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go index 6ab51788..655d0bf2 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/validation/validation.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/validation/validation.go @@ -33,7 +33,6 @@ import ( apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/resource" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" - genericvalidation "k8s.io/apimachinery/pkg/api/validation" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" "k8s.io/apimachinery/pkg/labels" @@ -43,31 +42,33 @@ import ( "k8s.io/apimachinery/pkg/util/validation" "k8s.io/apimachinery/pkg/util/validation/field" utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" - "k8s.io/kubernetes/pkg/api/legacyscheme" apiservice "k8s.io/kubernetes/pkg/api/service" - k8s_api_v1 "k8s.io/kubernetes/pkg/api/v1" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" + podshelper "k8s.io/kubernetes/pkg/apis/core/pods" + corev1 "k8s.io/kubernetes/pkg/apis/core/v1" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/capabilities" "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/pkg/fieldpath" "k8s.io/kubernetes/pkg/security/apparmor" ) // TODO: delete this global variable when we enable the validation of common // fields by default. -var RepairMalformedUpdates bool = genericvalidation.RepairMalformedUpdates +var RepairMalformedUpdates bool = apimachineryvalidation.RepairMalformedUpdates const isNegativeErrorMsg string = apimachineryvalidation.IsNegativeErrorMsg const isInvalidQuotaResource string = `must be a standard resource for quota` -const fieldImmutableErrorMsg string = genericvalidation.FieldImmutableErrorMsg +const fieldImmutableErrorMsg string = apimachineryvalidation.FieldImmutableErrorMsg const isNotIntegerErrorMsg string = `must be an integer` +const isNotPositiveErrorMsg string = `must be greater than zero` var pdPartitionErrorMsg string = validation.InclusiveRangeError(1, 255) -var volumeModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" +var fileModeErrorMsg string = "must be a number between 0 and 0777 (octal), both inclusive" // BannedOwners is a black list of object that are not allowed to be owners. -var BannedOwners = genericvalidation.BannedOwners +var BannedOwners = apimachineryvalidation.BannedOwners var iscsiInitiatorIqnRegex = regexp.MustCompile(`iqn\.\d{4}-\d{2}\.([[:alnum:]-.]+)(:[^,;*&$|\s]+)$`) var iscsiInitiatorEuiRegex = regexp.MustCompile(`^eui.[[:alnum:]]{16}$`) @@ -91,7 +92,7 @@ func ValidateHasLabel(meta metav1.ObjectMeta, fldPath *field.Path, key, expected // ValidateAnnotations validates that a set of annotations are correctly defined. func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateAnnotations(annotations, fldPath) + return apimachineryvalidation.ValidateAnnotations(annotations, fldPath) } func ValidateDNS1123Label(value string, fldPath *field.Path) field.ErrorList { @@ -111,37 +112,37 @@ func ValidateDNS1123Subdomain(value string, fldPath *field.Path) field.ErrorList return allErrs } -func ValidatePodSpecificAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodSpecificAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if value, isMirror := annotations[api.MirrorPodAnnotationKey]; isMirror { + if value, isMirror := annotations[core.MirrorPodAnnotationKey]; isMirror { if len(spec.NodeName) == 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.MirrorPodAnnotationKey), value, "must set spec.nodeName if mirror pod annotation is set")) } } - if annotations[api.TolerationsAnnotationKey] != "" { + if annotations[core.TolerationsAnnotationKey] != "" { allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...) } allErrs = append(allErrs, ValidateSeccompPodAnnotations(annotations, fldPath)...) allErrs = append(allErrs, ValidateAppArmorPodAnnotations(annotations, spec, fldPath)...) - sysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.SysctlsPodAnnotationKey]) + sysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.SysctlsPodAnnotationKey]) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.SysctlsPodAnnotationKey), annotations[api.SysctlsPodAnnotationKey], err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.SysctlsPodAnnotationKey), annotations[core.SysctlsPodAnnotationKey], err.Error())) } else { - allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(api.SysctlsPodAnnotationKey))...) + allErrs = append(allErrs, validateSysctls(sysctls, fldPath.Key(core.SysctlsPodAnnotationKey))...) } - unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[api.UnsafeSysctlsPodAnnotationKey]) + unsafeSysctls, err := helper.SysctlsFromPodAnnotation(annotations[core.UnsafeSysctlsPodAnnotationKey]) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), annotations[api.UnsafeSysctlsPodAnnotationKey], err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), annotations[core.UnsafeSysctlsPodAnnotationKey], err.Error())) } else { - allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(api.UnsafeSysctlsPodAnnotationKey))...) + allErrs = append(allErrs, validateSysctls(unsafeSysctls, fldPath.Key(core.UnsafeSysctlsPodAnnotationKey))...) } inBoth := sysctlIntersection(sysctls, unsafeSysctls) if len(inBoth) > 0 { - allErrs = append(allErrs, field.Invalid(fldPath.Key(api.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) + allErrs = append(allErrs, field.Invalid(fldPath.Key(core.UnsafeSysctlsPodAnnotationKey), strings.Join(inBoth, ", "), "can not be safe and unsafe")) } return allErrs @@ -153,18 +154,18 @@ func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath tolerations, err := helper.GetTolerationsFromPodAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.TolerationsAnnotationKey, err.Error())) return allErrs } if len(tolerations) > 0 { - allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...) + allErrs = append(allErrs, ValidateTolerations(tolerations, fldPath.Child(core.TolerationsAnnotationKey))...) } return allErrs } -func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *field.Path) field.ErrorList { +func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *core.Pod, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} newAnnotations := newPod.Annotations oldAnnotations := oldPod.Annotations @@ -175,7 +176,7 @@ func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *fiel if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update AppArmor annotations")) } - if k == api.MirrorPodAnnotationKey { + if k == core.MirrorPodAnnotationKey { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not remove or update mirror pod annotation")) } } @@ -187,7 +188,7 @@ func ValidatePodSpecificAnnotationUpdates(newPod, oldPod *api.Pod, fldPath *fiel if strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add AppArmor annotations")) } - if k == api.MirrorPodAnnotationKey { + if k == core.MirrorPodAnnotationKey { allErrs = append(allErrs, field.Forbidden(fldPath.Key(k), "may not add mirror pod annotation")) } } @@ -201,7 +202,7 @@ func ValidateEndpointsSpecificAnnotations(annotations map[string]string, fldPath } func ValidateOwnerReferences(ownerReferences []metav1.OwnerReference, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateOwnerReferences(ownerReferences, fldPath) + return apimachineryvalidation.ValidateOwnerReferences(ownerReferences, fldPath) } // ValidateNameFunc validates that the provided name is valid for a given resource type. @@ -273,7 +274,7 @@ var ValidateServiceAccountName = apimachineryvalidation.ValidateServiceAccountNa var ValidateEndpointsName = NameIsDNSSubdomain // ValidateClusterName can be used to check whether the given cluster name is valid. -var ValidateClusterName = genericvalidation.ValidateClusterName +var ValidateClusterName = apimachineryvalidation.ValidateClusterName // ValidateClassName can be used to check whether the given class name is valid. // It is defined here to avoid import cycle between pkg/apis/storage/validation @@ -284,7 +285,7 @@ var ValidateClassName = NameIsDNSSubdomain // class name is valid. var ValidatePriorityClassName = NameIsDNSSubdomain -// TODO update all references to these functions to point to the genericvalidation ones +// TODO update all references to these functions to point to the apimachineryvalidation ones // NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain. func NameIsDNSSubdomain(name string, prefix bool) []string { return apimachineryvalidation.NameIsDNSSubdomain(name, prefix) @@ -314,8 +315,17 @@ func ValidateNonnegativeQuantity(value resource.Quantity, fldPath *field.Path) f return allErrs } +// Validates that a Quantity is positive +func ValidatePositiveQuantityValue(value resource.Quantity, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if value.Cmp(resource.Quantity{}) <= 0 { + allErrs = append(allErrs, field.Invalid(fldPath, value.String(), isNotPositiveErrorMsg)) + } + return allErrs +} + func ValidateImmutableField(newVal, oldVal interface{}, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateImmutableField(newVal, oldVal, fldPath) + return apimachineryvalidation.ValidateImmutableField(newVal, oldVal, fldPath) } func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string, fldPath *field.Path) field.ErrorList { @@ -332,7 +342,7 @@ func ValidateImmutableAnnotation(newVal string, oldVal string, annotation string // It doesn't return an error for rootscoped resources with namespace, because namespace should already be cleared before. // TODO: Remove calls to this method scattered in validations of specific resources, e.g., ValidatePodUpdate. func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn ValidateNameFunc, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) + allErrs := apimachineryvalidation.ValidateObjectMeta(meta, requiresNamespace, apimachineryvalidation.ValidateNameFunc(nameFn), fldPath) // run additional checks for the finalizer name for i := range meta.Finalizers { allErrs = append(allErrs, validateKubeFinalizerName(string(meta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) @@ -342,7 +352,7 @@ func ValidateObjectMeta(meta *metav1.ObjectMeta, requiresNamespace bool, nameFn // ValidateObjectMetaUpdate validates an object's metadata when updated func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) + allErrs := apimachineryvalidation.ValidateObjectMetaUpdate(newMeta, oldMeta, fldPath) // run additional checks for the finalizer name for i := range newMeta.Finalizers { allErrs = append(allErrs, validateKubeFinalizerName(string(newMeta.Finalizers[i]), fldPath.Child("finalizers").Index(i))...) @@ -352,13 +362,14 @@ func ValidateObjectMetaUpdate(newMeta, oldMeta *metav1.ObjectMeta, fldPath *fiel } func ValidateNoNewFinalizers(newFinalizers []string, oldFinalizers []string, fldPath *field.Path) field.ErrorList { - return genericvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) + return apimachineryvalidation.ValidateNoNewFinalizers(newFinalizers, oldFinalizers, fldPath) } -func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, field.ErrorList) { +func ValidateVolumes(volumes []core.Volume, fldPath *field.Path) (map[string]core.VolumeSource, field.ErrorList) { allErrs := field.ErrorList{} allNames := sets.String{} + vols := make(map[string]core.VolumeSource) for i, vol := range volumes { idxPath := fldPath.Index(i) namePath := idxPath.Child("name") @@ -373,15 +384,72 @@ func ValidateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, fi } if len(el) == 0 { allNames.Insert(vol.Name) + vols[vol.Name] = vol.VolumeSource } else { allErrs = append(allErrs, el...) } } - return allNames, allErrs + return vols, allErrs } -func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { +func IsMatchedVolume(name string, volumes map[string]core.VolumeSource) bool { + if _, ok := volumes[name]; ok { + return true + } else { + return false + } +} + +func isMatchedDevice(name string, volumes map[string]core.VolumeSource) (bool, bool) { + if source, ok := volumes[name]; ok { + if source.PersistentVolumeClaim != nil { + return true, true + } else { + return true, false + } + } else { + return false, false + } +} + +func mountNameAlreadyExists(name string, devices map[string]string) bool { + if _, ok := devices[name]; ok { + return true + } else { + return false + } +} + +func mountPathAlreadyExists(mountPath string, devices map[string]string) bool { + for _, devPath := range devices { + if mountPath == devPath { + return true + } + } + + return false +} + +func deviceNameAlreadyExists(name string, mounts map[string]string) bool { + if _, ok := mounts[name]; ok { + return true + } else { + return false + } +} + +func devicePathAlreadyExists(devicePath string, mounts map[string]string) bool { + for _, mountPath := range mounts { + if mountPath == devicePath { + return true + } + } + + return false +} + +func validateVolumeSource(source *core.VolumeSource, fldPath *field.Path, volName string) field.ErrorList { numVolumes := 0 allErrs := field.ErrorList{} if source.EmptyDir != nil { @@ -395,7 +463,7 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("sizeLimit"), "SizeLimit field must be a valid resource quantity")) } } - if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == api.StorageMediumHugePages { + if !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) && source.EmptyDir.Medium == core.StorageMediumHugePages { allErrs = append(allErrs, field.Forbidden(fldPath.Child("emptyDir").Child("medium"), "HugePages medium is disabled by feature-gate for EmptyDir volumes")) } } @@ -621,7 +689,7 @@ func validateVolumeSource(source *api.VolumeSource, fldPath *field.Path, volName return allErrs } -func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { +func validateHostPathVolumeSource(hostPath *core.HostPathVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(hostPath.Path) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) @@ -633,7 +701,7 @@ func validateHostPathVolumeSource(hostPath *api.HostPathVolumeSource, fldPath *f return allErrs } -func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGitRepoVolumeSource(gitRepo *core.GitRepoVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(gitRepo.Repository) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("repository"), "")) @@ -644,7 +712,47 @@ func validateGitRepoVolumeSource(gitRepo *api.GitRepoVolumeSource, fldPath *fiel return allErrs } -func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { +func validateISCSIVolumeSource(iscsi *core.ISCSIVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + if len(iscsi.TargetPortal) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) + } + if len(iscsi.IQN) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("iqn"), "")) + } else { + if !strings.HasPrefix(iscsi.IQN, "iqn") && !strings.HasPrefix(iscsi.IQN, "eui") && !strings.HasPrefix(iscsi.IQN, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format starting with iqn, eui, or naa")) + } else if strings.HasPrefix(iscsi.IQN, "iqn") && !iscsiInitiatorIqnRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "eui") && !iscsiInitiatorEuiRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } else if strings.HasPrefix(iscsi.IQN, "naa") && !iscsiInitiatorNaaRegex.MatchString(iscsi.IQN) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("iqn"), iscsi.IQN, "must be valid format")) + } + } + if iscsi.Lun < 0 || iscsi.Lun > 255 { + allErrs = append(allErrs, field.Invalid(fldPath.Child("lun"), iscsi.Lun, validation.InclusiveRangeError(0, 255))) + } + if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) + } + if iscsi.InitiatorName != nil { + initiator := *iscsi.InitiatorName + if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format starting with iqn, eui, or naa")) + } + if strings.HasPrefix(initiator, "iqn") && !iscsiInitiatorIqnRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "eui") && !iscsiInitiatorEuiRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } else if strings.HasPrefix(initiator, "naa") && !iscsiInitiatorNaaRegex.MatchString(initiator) { + allErrs = append(allErrs, field.Invalid(fldPath.Child("initiatorname"), initiator, "must be valid format")) + } + } + return allErrs +} + +func validateISCSIPersistentVolumeSource(iscsi *core.ISCSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(iscsi.TargetPortal) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("targetPortal"), "")) @@ -668,6 +776,11 @@ func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path if (iscsi.DiscoveryCHAPAuth || iscsi.SessionCHAPAuth) && iscsi.SecretRef == nil { allErrs = append(allErrs, field.Required(fldPath.Child("secretRef"), "")) } + if iscsi.SecretRef != nil { + if len(iscsi.SecretRef.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("secretRef", "name"), "")) + } + } if iscsi.InitiatorName != nil { initiator := *iscsi.InitiatorName if !strings.HasPrefix(initiator, "iqn") && !strings.HasPrefix(initiator, "eui") && !strings.HasPrefix(initiator, "naa") { @@ -684,7 +797,7 @@ func validateISCSIVolumeSource(iscsi *api.ISCSIVolumeSource, fldPath *field.Path return allErrs } -func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFCVolumeSource(fc *core.FCVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fc.TargetWWNs) < 1 && len(fc.WWIDs) < 1 { allErrs = append(allErrs, field.Required(fldPath.Child("targetWWNs"), "must specify either targetWWNs or wwids, but not both")) @@ -706,7 +819,7 @@ func validateFCVolumeSource(fc *api.FCVolumeSource, fldPath *field.Path) field.E return allErrs } -func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGCEPersistentDiskVolumeSource(pd *core.GCEPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(pd.PDName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("pdName"), "")) @@ -717,7 +830,7 @@ func validateGCEPersistentDiskVolumeSource(pd *api.GCEPersistentDiskVolumeSource return allErrs } -func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAWSElasticBlockStoreVolumeSource(PD *core.AWSElasticBlockStoreVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(PD.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -728,7 +841,7 @@ func validateAWSElasticBlockStoreVolumeSource(PD *api.AWSElasticBlockStoreVolume return allErrs } -func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *field.Path) field.ErrorList { +func validateSecretVolumeSource(secretSource *core.SecretVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(secretSource.SecretName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -736,7 +849,7 @@ func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *f secretMode := secretSource.DefaultMode if secretMode != nil && (*secretMode > 0777 || *secretMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *secretMode, fileModeErrorMsg)) } itemsPath := fldPath.Child("items") @@ -747,7 +860,7 @@ func validateSecretVolumeSource(secretSource *api.SecretVolumeSource, fldPath *f return allErrs } -func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { +func validateConfigMapVolumeSource(configMapSource *core.ConfigMapVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(configMapSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -755,7 +868,7 @@ func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, f configMapMode := configMapSource.DefaultMode if configMapMode != nil && (*configMapMode > 0777 || *configMapMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *configMapMode, fileModeErrorMsg)) } itemsPath := fldPath.Child("items") @@ -766,7 +879,7 @@ func validateConfigMapVolumeSource(configMapSource *api.ConfigMapVolumeSource, f return allErrs } -func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { +func validateKeyToPath(kp *core.KeyToPath, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(kp.Key) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("key"), "")) @@ -776,13 +889,13 @@ func validateKeyToPath(kp *api.KeyToPath, fldPath *field.Path) field.ErrorList { } allErrs = append(allErrs, validateLocalNonReservedPath(kp.Path, fldPath.Child("path"))...) if kp.Mode != nil && (*kp.Mode > 0777 || *kp.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *kp.Mode, fileModeErrorMsg)) } return allErrs } -func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePersistentClaimVolumeSource(claim *core.PersistentVolumeClaimVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(claim.ClaimName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("claimName"), "")) @@ -790,7 +903,7 @@ func validatePersistentClaimVolumeSource(claim *api.PersistentVolumeClaimVolumeS return allErrs } -func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateNFSVolumeSource(nfs *core.NFSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(nfs.Server) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("server"), "")) @@ -804,7 +917,7 @@ func validateNFSVolumeSource(nfs *api.NFSVolumeSource, fldPath *field.Path) fiel return allErrs } -func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { +func validateQuobyteVolumeSource(quobyte *core.QuobyteVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(quobyte.Registry) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("registry"), "must be a host:port pair or multiple pairs separated by commas")) @@ -822,7 +935,7 @@ func validateQuobyteVolumeSource(quobyte *api.QuobyteVolumeSource, fldPath *fiel return allErrs } -func validateGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { +func validateGlusterfsVolumeSource(glusterfs *core.GlusterfsVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(glusterfs.EndpointsName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("endpoints"), "")) @@ -833,7 +946,7 @@ func validateGlusterfsVolumeSource(glusterfs *api.GlusterfsVolumeSource, fldPath return allErrs } -func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFlockerVolumeSource(flocker *core.FlockerVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(flocker.DatasetName) == 0 && len(flocker.DatasetUUID) == 0 { //TODO: consider adding a RequiredOneOf() error for this and similar cases @@ -848,14 +961,14 @@ func validateFlockerVolumeSource(flocker *api.FlockerVolumeSource, fldPath *fiel return allErrs } -var validDownwardAPIFieldPathExpressions = sets.NewString( +var validVolumeDownwardAPIFieldPathExpressions = sets.NewString( "metadata.name", "metadata.namespace", "metadata.labels", "metadata.annotations", "metadata.uid") -func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { +func validateDownwardAPIVolumeFile(file *core.DownwardAPIVolumeFile, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(file.Path) == 0 { @@ -863,7 +976,7 @@ func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *fie } allErrs = append(allErrs, validateLocalNonReservedPath(file.Path, fldPath.Child("path"))...) if file.FieldRef != nil { - allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) + allErrs = append(allErrs, validateObjectFieldSelector(file.FieldRef, &validVolumeDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) if file.ResourceFieldRef != nil { allErrs = append(allErrs, field.Invalid(fldPath, "resource", "fieldRef and resourceFieldRef can not be specified simultaneously")) } @@ -873,18 +986,18 @@ func validateDownwardAPIVolumeFile(file *api.DownwardAPIVolumeFile, fldPath *fie allErrs = append(allErrs, field.Required(fldPath, "one of fieldRef and resourceFieldRef is required")) } if file.Mode != nil && (*file.Mode > 0777 || *file.Mode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("mode"), *file.Mode, fileModeErrorMsg)) } return allErrs } -func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { +func validateDownwardAPIVolumeSource(downwardAPIVolume *core.DownwardAPIVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} downwardAPIMode := downwardAPIVolume.DefaultMode if downwardAPIMode != nil && (*downwardAPIMode > 0777 || *downwardAPIMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *downwardAPIMode, fileModeErrorMsg)) } for _, file := range downwardAPIVolume.Items { @@ -893,7 +1006,7 @@ func validateDownwardAPIVolumeSource(downwardAPIVolume *api.DownwardAPIVolumeSou return allErrs } -func validateProjectionSources(projection *api.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { +func validateProjectionSources(projection *core.ProjectedVolumeSource, projectionMode *int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allPaths := sets.String{} @@ -969,12 +1082,12 @@ func validateProjectionSources(projection *api.ProjectedVolumeSource, projection return allErrs } -func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { +func validateProjectedVolumeSource(projection *core.ProjectedVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} projectionMode := projection.DefaultMode if projectionMode != nil && (*projectionMode > 0777 || *projectionMode < 0) { - allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, volumeModeErrorMsg)) + allErrs = append(allErrs, field.Invalid(fldPath.Child("defaultMode"), *projectionMode, fileModeErrorMsg)) } allErrs = append(allErrs, validateProjectionSources(projection, projectionMode, fldPath)...) @@ -982,16 +1095,16 @@ func validateProjectedVolumeSource(projection *api.ProjectedVolumeSource, fldPat } var supportedHostPathTypes = sets.NewString( - string(api.HostPathUnset), - string(api.HostPathDirectoryOrCreate), - string(api.HostPathDirectory), - string(api.HostPathFileOrCreate), - string(api.HostPathFile), - string(api.HostPathSocket), - string(api.HostPathCharDev), - string(api.HostPathBlockDev)) + string(core.HostPathUnset), + string(core.HostPathDirectoryOrCreate), + string(core.HostPathDirectory), + string(core.HostPathFileOrCreate), + string(core.HostPathFile), + string(core.HostPathSocket), + string(core.HostPathCharDev), + string(core.HostPathBlockDev)) -func validateHostPathType(hostPathType *api.HostPathType, fldPath *field.Path) field.ErrorList { +func validateHostPathType(hostPathType *core.HostPathType, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if hostPathType != nil && !supportedHostPathTypes.Has(string(*hostPathType)) { @@ -1033,7 +1146,7 @@ func validatePathNoBacksteps(targetPath string, fldPath *field.Path) field.Error // validateMountPropagation verifies that MountPropagation field is valid and // allowed for given container. -func validateMountPropagation(mountPropagation *api.MountPropagationMode, container *api.Container, fldPath *field.Path) field.ErrorList { +func validateMountPropagation(mountPropagation *core.MountPropagationMode, container *core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if mountPropagation == nil { @@ -1044,7 +1157,7 @@ func validateMountPropagation(mountPropagation *api.MountPropagationMode, contai return allErrs } - supportedMountPropagations := sets.NewString(string(api.MountPropagationBidirectional), string(api.MountPropagationHostToContainer)) + supportedMountPropagations := sets.NewString(string(core.MountPropagationBidirectional), string(core.MountPropagationHostToContainer)) if !supportedMountPropagations.Has(string(*mountPropagation)) { allErrs = append(allErrs, field.NotSupported(fldPath, *mountPropagation, supportedMountPropagations.List())) } @@ -1057,7 +1170,7 @@ func validateMountPropagation(mountPropagation *api.MountPropagationMode, contai } privileged := container.SecurityContext != nil && container.SecurityContext.Privileged != nil && *container.SecurityContext.Privileged - if *mountPropagation == api.MountPropagationBidirectional && !privileged { + if *mountPropagation == core.MountPropagationBidirectional && !privileged { allErrs = append(allErrs, field.Forbidden(fldPath, "Bidirectional mount propagation is available only to privileged containers")) } return allErrs @@ -1077,7 +1190,7 @@ func validateLocalNonReservedPath(targetPath string, fldPath *field.Path) field. return allErrs } -func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) field.ErrorList { +func validateRBDVolumeSource(rbd *core.RBDVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(rbd.CephMonitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1088,7 +1201,7 @@ func validateRBDVolumeSource(rbd *api.RBDVolumeSource, fldPath *field.Path) fiel return allErrs } -func validateRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateRBDPersistentVolumeSource(rbd *core.RBDPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(rbd.CephMonitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1099,7 +1212,7 @@ func validateRBDPersistentVolumeSource(rbd *api.RBDPersistentVolumeSource, fldPa return allErrs } -func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCinderVolumeSource(cd *core.CinderVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -1107,7 +1220,7 @@ func validateCinderVolumeSource(cd *api.CinderVolumeSource, fldPath *field.Path) return allErrs } -func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCephFSVolumeSource(cephfs *core.CephFSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cephfs.Monitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1115,7 +1228,7 @@ func validateCephFSVolumeSource(cephfs *api.CephFSVolumeSource, fldPath *field.P return allErrs } -func validateCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateCephFSPersistentVolumeSource(cephfs *core.CephFSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cephfs.Monitors) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("monitors"), "")) @@ -1123,7 +1236,7 @@ func validateCephFSPersistentVolumeSource(cephfs *api.CephFSPersistentVolumeSour return allErrs } -func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) field.ErrorList { +func validateFlexVolumeSource(fv *core.FlexVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fv.Driver) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) @@ -1144,7 +1257,7 @@ func validateFlexVolumeSource(fv *api.FlexVolumeSource, fldPath *field.Path) fie return allErrs } -func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAzureFile(azure *core.AzureFileVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if azure.SecretName == "" { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -1155,7 +1268,7 @@ func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) fi return allErrs } -func validateAzureFilePV(azure *api.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateAzureFilePV(azure *core.AzureFilePersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if azure.SecretName == "" { allErrs = append(allErrs, field.Required(fldPath.Child("secretName"), "")) @@ -1171,9 +1284,9 @@ func validateAzureFilePV(azure *api.AzureFilePersistentVolumeSource, fldPath *fi return allErrs } -func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { - var supportedCachingModes = sets.NewString(string(api.AzureDataDiskCachingNone), string(api.AzureDataDiskCachingReadOnly), string(api.AzureDataDiskCachingReadWrite)) - var supportedDiskKinds = sets.NewString(string(api.AzureSharedBlobDisk), string(api.AzureDedicatedBlobDisk), string(api.AzureManagedDisk)) +func validateAzureDisk(azure *core.AzureDiskVolumeSource, fldPath *field.Path) field.ErrorList { + var supportedCachingModes = sets.NewString(string(core.AzureDataDiskCachingNone), string(core.AzureDataDiskCachingReadOnly), string(core.AzureDataDiskCachingReadWrite)) + var supportedDiskKinds = sets.NewString(string(core.AzureSharedBlobDisk), string(core.AzureDedicatedBlobDisk), string(core.AzureManagedDisk)) diskUriSupportedManaged := []string{"/subscriptions/{sub-id}/resourcegroups/{group-name}/providers/microsoft.compute/disks/{disk-id}"} diskUriSupportedblob := []string{"https://{account-name}.blob.core.windows.net/{container-name}/{disk-name}.vhd"} @@ -1196,18 +1309,18 @@ func validateAzureDisk(azure *api.AzureDiskVolumeSource, fldPath *field.Path) fi } // validate that DiskUri is the correct format - if azure.Kind != nil && *azure.Kind == api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { + if azure.Kind != nil && *azure.Kind == core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "/subscriptions/") != 0 { allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedManaged)) } - if azure.Kind != nil && *azure.Kind != api.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { + if azure.Kind != nil && *azure.Kind != core.AzureManagedDisk && strings.Index(azure.DataDiskURI, "https://") != 0 { allErrs = append(allErrs, field.NotSupported(fldPath.Child("diskURI"), azure.DataDiskURI, diskUriSupportedblob)) } return allErrs } -func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validateVsphereVolumeSource(cd *core.VsphereVirtualDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.VolumePath) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumePath"), "")) @@ -1215,7 +1328,7 @@ func validateVsphereVolumeSource(cd *api.VsphereVirtualDiskVolumeSource, fldPath return allErrs } -func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePhotonPersistentDiskVolumeSource(cd *core.PhotonPersistentDiskVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(cd.PdID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("pdID"), "")) @@ -1223,7 +1336,7 @@ func validatePhotonPersistentDiskVolumeSource(cd *api.PhotonPersistentDiskVolume return allErrs } -func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { +func validatePortworxVolumeSource(pwx *core.PortworxVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(pwx.VolumeID) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeID"), "")) @@ -1231,7 +1344,7 @@ func validatePortworxVolumeSource(pwx *api.PortworxVolumeSource, fldPath *field. return allErrs } -func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { +func validateScaleIOVolumeSource(sio *core.ScaleIOVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if sio.Gateway == "" { allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) @@ -1245,7 +1358,7 @@ func validateScaleIOVolumeSource(sio *api.ScaleIOVolumeSource, fldPath *field.Pa return allErrs } -func validateScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateScaleIOPersistentVolumeSource(sio *core.ScaleIOPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if sio.Gateway == "" { allErrs = append(allErrs, field.Required(fldPath.Child("gateway"), "")) @@ -1259,7 +1372,7 @@ func validateScaleIOPersistentVolumeSource(sio *api.ScaleIOPersistentVolumeSourc return allErrs } -func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) field.ErrorList { +func validateLocalVolumeSource(ls *core.LocalVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ls.Path == "" { allErrs = append(allErrs, field.Required(fldPath.Child("path"), "")) @@ -1270,7 +1383,7 @@ func validateLocalVolumeSource(ls *api.LocalVolumeSource, fldPath *field.Path) f return allErrs } -func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { +func validateStorageOSVolumeSource(storageos *core.StorageOSVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(storageos.VolumeName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) @@ -1288,7 +1401,7 @@ func validateStorageOSVolumeSource(storageos *api.StorageOSVolumeSource, fldPath return allErrs } -func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { +func validateStorageOSPersistentVolumeSource(storageos *core.StorageOSPersistentVolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(storageos.VolumeName) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("volumeName"), "")) @@ -1309,15 +1422,35 @@ func validateStorageOSPersistentVolumeSource(storageos *api.StorageOSPersistentV return allErrs } +func validateCSIPersistentVolumeSource(csi *core.CSIPersistentVolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + if !utilfeature.DefaultFeatureGate.Enabled(features.CSIPersistentVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath, "CSIPersistentVolume disabled by feature-gate")) + } + + if len(csi.Driver) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("driver"), "")) + } + + if len(csi.VolumeHandle) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("volumeHandle"), "")) + } + + return allErrs +} + // ValidatePersistentVolumeName checks that a name is appropriate for a // PersistentVolumeName object. var ValidatePersistentVolumeName = NameIsDNSSubdomain -var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany)) +var supportedAccessModes = sets.NewString(string(core.ReadWriteOnce), string(core.ReadOnlyMany), string(core.ReadWriteMany)) -var supportedReclaimPolicy = sets.NewString(string(api.PersistentVolumeReclaimDelete), string(api.PersistentVolumeReclaimRecycle), string(api.PersistentVolumeReclaimRetain)) +var supportedReclaimPolicy = sets.NewString(string(core.PersistentVolumeReclaimDelete), string(core.PersistentVolumeReclaimRecycle), string(core.PersistentVolumeReclaimRetain)) -func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { +var supportedVolumeModes = sets.NewString(string(core.PersistentVolumeBlock), string(core.PersistentVolumeFilesystem)) + +func ValidatePersistentVolume(pv *core.PersistentVolume) field.ErrorList { metaPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&pv.ObjectMeta, false, ValidatePersistentVolumeName, metaPath) @@ -1335,12 +1468,13 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Required(specPath.Child("capacity"), "")) } - if _, ok := pv.Spec.Capacity[api.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { - allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(api.ResourceStorage)})) + if _, ok := pv.Spec.Capacity[core.ResourceStorage]; !ok || len(pv.Spec.Capacity) > 1 { + allErrs = append(allErrs, field.NotSupported(specPath.Child("capacity"), pv.Spec.Capacity, []string{string(core.ResourceStorage)})) } capPath := specPath.Child("capacity") for r, qty := range pv.Spec.Capacity { allErrs = append(allErrs, validateBasicResource(qty, capPath.Key(string(r)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(qty, capPath.Key(string(r)))...) } if len(string(pv.Spec.PersistentVolumeReclaimPolicy)) > 0 { if !supportedReclaimPolicy.Has(string(pv.Spec.PersistentVolumeReclaimPolicy)) { @@ -1429,7 +1563,7 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Forbidden(specPath.Child("iscsi"), "may not specify more than 1 volume type")) } else { numVolumes++ - allErrs = append(allErrs, validateISCSIVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) + allErrs = append(allErrs, validateISCSIPersistentVolumeSource(pv.Spec.ISCSI, specPath.Child("iscsi"))...) } if pv.Spec.ISCSI.InitiatorName != nil && len(pv.ObjectMeta.Name+":"+pv.Spec.ISCSI.TargetPortal) > 64 { tooLongErr := "Total length of : must be under 64 characters if iscsi.initiatorName is specified." @@ -1531,12 +1665,21 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { } } + if pv.Spec.CSI != nil { + if numVolumes > 0 { + allErrs = append(allErrs, field.Forbidden(specPath.Child("csi"), "may not specify more than 1 volume type")) + } else { + numVolumes++ + allErrs = append(allErrs, validateCSIPersistentVolumeSource(pv.Spec.CSI, specPath.Child("csi"))...) + } + } + if numVolumes == 0 { allErrs = append(allErrs, field.Required(specPath, "must specify a volume type")) } // do not allow hostPath mounts of '/' to have a 'recycle' reclaim policy - if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == api.PersistentVolumeReclaimRecycle { + if pv.Spec.HostPath != nil && path.Clean(pv.Spec.HostPath.Path) == "/" && pv.Spec.PersistentVolumeReclaimPolicy == core.PersistentVolumeReclaimRecycle { allErrs = append(allErrs, field.Forbidden(specPath.Child("persistentVolumeReclaimPolicy"), "may not be 'recycle' for a hostPath mount of '/'")) } @@ -1545,13 +1688,17 @@ func ValidatePersistentVolume(pv *api.PersistentVolume) field.ErrorList { allErrs = append(allErrs, field.Invalid(specPath.Child("storageClassName"), pv.Spec.StorageClassName, msg)) } } - + if pv.Spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(specPath.Child("volumeMode"), "PersistentVolume volumeMode is disabled by feature-gate")) + } else if pv.Spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*pv.Spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(specPath.Child("volumeMode"), *pv.Spec.VolumeMode, supportedVolumeModes.List())) + } return allErrs } // ValidatePersistentVolumeUpdate tests to see if the update is legal for an end user to make. // newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { +func ValidatePersistentVolumeUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { allErrs := field.ErrorList{} allErrs = ValidatePersistentVolume(newPv) @@ -1561,12 +1708,17 @@ func ValidatePersistentVolumeUpdate(newPv, oldPv *api.PersistentVolume) field.Er } newPv.Status = oldPv.Status + + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPv.Spec.VolumeMode, oldPv.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + return allErrs } // ValidatePersistentVolumeStatusUpdate tests to see if the status update is legal for an end user to make. // newPv is updated with fields that cannot be changed. -func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) field.ErrorList { +func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *core.PersistentVolume) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPv.ObjectMeta, &oldPv.ObjectMeta, field.NewPath("metadata")) if len(newPv.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -1576,14 +1728,14 @@ func ValidatePersistentVolumeStatusUpdate(newPv, oldPv *api.PersistentVolume) fi } // ValidatePersistentVolumeClaim validates a PersistentVolumeClaim -func ValidatePersistentVolumeClaim(pvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaim(pvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMeta(&pvc.ObjectMeta, true, ValidatePersistentVolumeName, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePersistentVolumeClaimSpec(&pvc.Spec, field.NewPath("spec"))...) return allErrs } // ValidatePersistentVolumeClaimSpec validates a PersistentVolumeClaimSpec -func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { +func ValidatePersistentVolumeClaimSpec(spec *core.PersistentVolumeClaimSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(spec.AccessModes) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("accessModes"), "at least 1 access mode is required")) @@ -1592,15 +1744,16 @@ func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldP allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(spec.Selector, fldPath.Child("selector"))...) } for _, mode := range spec.AccessModes { - if mode != api.ReadWriteOnce && mode != api.ReadOnlyMany && mode != api.ReadWriteMany { + if mode != core.ReadWriteOnce && mode != core.ReadOnlyMany && mode != core.ReadWriteMany { allErrs = append(allErrs, field.NotSupported(fldPath.Child("accessModes"), mode, supportedAccessModes.List())) } } - storageValue, ok := spec.Resources.Requests[api.ResourceStorage] + storageValue, ok := spec.Resources.Requests[core.ResourceStorage] if !ok { - allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(api.ResourceStorage)), "")) + allErrs = append(allErrs, field.Required(fldPath.Child("resources").Key(string(core.ResourceStorage)), "")) } else { - allErrs = append(allErrs, ValidateResourceQuantityValue(string(api.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(api.ResourceStorage)))...) + allErrs = append(allErrs, ValidateResourceQuantityValue(string(core.ResourceStorage), storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) + allErrs = append(allErrs, ValidatePositiveQuantityValue(storageValue, fldPath.Child("resources").Key(string(core.ResourceStorage)))...) } if spec.StorageClassName != nil && len(*spec.StorageClassName) > 0 { @@ -1608,11 +1761,16 @@ func ValidatePersistentVolumeClaimSpec(spec *api.PersistentVolumeClaimSpec, fldP allErrs = append(allErrs, field.Invalid(fldPath.Child("storageClassName"), *spec.StorageClassName, msg)) } } + if spec.VolumeMode != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeMode"), "PersistentVolumeClaim volumeMode is disabled by feature-gate")) + } else if spec.VolumeMode != nil && !supportedVolumeModes.Has(string(*spec.VolumeMode)) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("volumeMode"), *spec.VolumeMode, supportedVolumeModes.List())) + } return allErrs } // ValidatePersistentVolumeClaimUpdate validates an update to a PersistentVolumeClaim -func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePersistentVolumeClaim(newPvc)...) // PVController needs to update PVC.Spec w/ VolumeName. @@ -1628,7 +1786,7 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeCla newPVCSpecCopy := newPvc.Spec.DeepCopy() // lets make sure storage values are same. - if newPvc.Status.Phase == api.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { + if newPvc.Status.Phase == core.ClaimBound && newPVCSpecCopy.Resources.Requests != nil { newPVCSpecCopy.Resources.Requests["storage"] = oldPvc.Spec.Resources.Requests["storage"] } @@ -1654,12 +1812,16 @@ func ValidatePersistentVolumeClaimUpdate(newPvc, oldPvc *api.PersistentVolumeCla // TODO: remove Beta when no longer needed allErrs = append(allErrs, ValidateImmutableAnnotation(newPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], oldPvc.ObjectMeta.Annotations[v1.BetaStorageClassAnnotation], v1.BetaStorageClassAnnotation, field.NewPath("metadata"))...) + if utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, ValidateImmutableField(newPvc.Spec.VolumeMode, oldPvc.Spec.VolumeMode, field.NewPath("volumeMode"))...) + } + newPvc.Status = oldPvc.Status return allErrs } // ValidatePersistentVolumeClaimStatusUpdate validates an update to status of a PersistentVolumeClaim -func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVolumeClaim) field.ErrorList { +func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *core.PersistentVolumeClaim) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newPvc.ObjectMeta, &oldPvc.ObjectMeta, field.NewPath("metadata")) if len(newPvc.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -1679,9 +1841,9 @@ func ValidatePersistentVolumeClaimStatusUpdate(newPvc, oldPvc *api.PersistentVol return allErrs } -var supportedPortProtocols = sets.NewString(string(api.ProtocolTCP), string(api.ProtocolUDP)) +var supportedPortProtocols = sets.NewString(string(core.ProtocolTCP), string(core.ProtocolUDP)) -func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) field.ErrorList { +func validateContainerPorts(ports []core.ContainerPort, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allNames := sets.String{} @@ -1720,7 +1882,7 @@ func validateContainerPorts(ports []api.ContainerPort, fldPath *field.Path) fiel } // ValidateEnv validates env vars -func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { +func ValidateEnv(vars []core.EnvVar, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ev := range vars { @@ -1737,10 +1899,17 @@ func ValidateEnv(vars []api.EnvVar, fldPath *field.Path) field.ErrorList { return allErrs } -var validFieldPathExpressionsEnv = sets.NewString("metadata.name", "metadata.namespace", "metadata.uid", "spec.nodeName", "spec.serviceAccountName", "status.hostIP", "status.podIP") +var validEnvDownwardAPIFieldPathExpressions = sets.NewString( + "metadata.name", + "metadata.namespace", + "metadata.uid", + "spec.nodeName", + "spec.serviceAccountName", + "status.hostIP", + "status.podIP") var validContainerResourceFieldPathExpressions = sets.NewString("limits.cpu", "limits.memory", "limits.ephemeral-storage", "requests.cpu", "requests.memory", "requests.ephemeral-storage") -func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList { +func validateEnvVarValueFrom(ev core.EnvVar, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if ev.ValueFrom == nil { @@ -1751,7 +1920,7 @@ func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList if ev.ValueFrom.FieldRef != nil { numSources++ - allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validFieldPathExpressionsEnv, fldPath.Child("fieldRef"))...) + allErrs = append(allErrs, validateObjectFieldSelector(ev.ValueFrom.FieldRef, &validEnvDownwardAPIFieldPathExpressions, fldPath.Child("fieldRef"))...) } if ev.ValueFrom.ResourceFieldRef != nil { numSources++ @@ -1779,20 +1948,40 @@ func validateEnvVarValueFrom(ev api.EnvVar, fldPath *field.Path) field.ErrorList return allErrs } -func validateObjectFieldSelector(fs *api.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { +func validateObjectFieldSelector(fs *core.ObjectFieldSelector, expressions *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(fs.APIVersion) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("apiVersion"), "")) - } else if len(fs.FieldPath) == 0 { + return allErrs + } + if len(fs.FieldPath) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("fieldPath"), "")) - } else { - internalFieldPath, _, err := legacyscheme.Scheme.ConvertFieldLabel(fs.APIVersion, "Pod", fs.FieldPath, "") - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) - } else if !expressions.Has(internalFieldPath) { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), internalFieldPath, expressions.List())) + return allErrs + } + + internalFieldPath, _, err := podshelper.ConvertDownwardAPIFieldLabel(fs.APIVersion, fs.FieldPath, "") + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("fieldPath"), fs.FieldPath, fmt.Sprintf("error converting fieldPath: %v", err))) + return allErrs + } + + if path, subscript, ok := fieldpath.SplitMaybeSubscriptedPath(internalFieldPath); ok { + switch path { + case "metadata.annotations": + for _, msg := range validation.IsQualifiedName(strings.ToLower(subscript)) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + case "metadata.labels": + for _, msg := range validation.IsQualifiedName(subscript) { + allErrs = append(allErrs, field.Invalid(fldPath, subscript, msg)) + } + default: + allErrs = append(allErrs, field.Invalid(fldPath, path, "does not support subscript")) } + } else if !expressions.Has(path) { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("fieldPath"), path, expressions.List())) + return allErrs } return allErrs @@ -1805,7 +1994,7 @@ func fsResourceIsEphemeralStorage(resource string) bool { return false } -func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { +func validateContainerResourceFieldSelector(fs *core.ResourceFieldSelector, expressions *sets.String, fldPath *field.Path, volume bool) field.ErrorList { allErrs := field.ErrorList{} if volume && len(fs.ContainerName) == 0 { @@ -1821,7 +2010,7 @@ func validateContainerResourceFieldSelector(fs *api.ResourceFieldSelector, expre return allErrs } -func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorList { +func ValidateEnvFrom(vars []core.EnvFromSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ev := range vars { idxPath := fldPath.Index(i) @@ -1850,7 +2039,7 @@ func ValidateEnvFrom(vars []api.EnvFromSource, fldPath *field.Path) field.ErrorL return allErrs } -func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { +func validateConfigMapEnvSource(configMapSource *core.ConfigMapEnvSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(configMapSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -1862,7 +2051,7 @@ func validateConfigMapEnvSource(configMapSource *api.ConfigMapEnvSource, fldPath return allErrs } -func validateSecretEnvSource(secretSource *api.SecretEnvSource, fldPath *field.Path) field.ErrorList { +func validateSecretEnvSource(secretSource *core.SecretEnvSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(secretSource.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -1901,7 +2090,7 @@ func validateContainerResourceDivisor(rName string, divisor resource.Quantity, f return allErrs } -func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { +func validateConfigMapKeySelector(s *core.ConfigMapKeySelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} nameFn := ValidateNameFunc(ValidateSecretName) @@ -1919,7 +2108,7 @@ func validateConfigMapKeySelector(s *api.ConfigMapKeySelector, fldPath *field.Pa return allErrs } -func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) field.ErrorList { +func validateSecretKeySelector(s *core.SecretKeySelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} nameFn := ValidateNameFunc(ValidateSecretName) @@ -1937,7 +2126,27 @@ func validateSecretKeySelector(s *api.SecretKeySelector, fldPath *field.Path) fi return allErrs } -func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, container *api.Container, fldPath *field.Path) field.ErrorList { +func GetVolumeMountMap(mounts []core.VolumeMount) map[string]string { + volmounts := make(map[string]string) + + for _, mnt := range mounts { + volmounts[mnt.Name] = mnt.MountPath + } + + return volmounts +} + +func GetVolumeDeviceMap(devices []core.VolumeDevice) map[string]string { + voldevices := make(map[string]string) + + for _, dev := range devices { + voldevices[dev.Name] = dev.DevicePath + } + + return voldevices +} + +func ValidateVolumeMounts(mounts []core.VolumeMount, voldevices map[string]string, volumes map[string]core.VolumeSource, container *core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} mountpoints := sets.NewString() @@ -1945,7 +2154,8 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain idxPath := fldPath.Index(i) if len(mnt.Name) == 0 { allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) - } else if !volumes.Has(mnt.Name) { + } + if !IsMatchedVolume(mnt.Name, volumes) { allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), mnt.Name)) } if len(mnt.MountPath) == 0 { @@ -1954,14 +2164,16 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain if mountpoints.Has(mnt.MountPath) { allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be unique")) } - if !path.IsAbs(mnt.MountPath) { - // also allow windows absolute path - p := mnt.MountPath - if len(p) < 2 || ((p[0] < 'A' || p[0] > 'Z') && (p[0] < 'a' || p[0] > 'z')) || p[1] != ':' { - allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must be an absolute path")) - } - } mountpoints.Insert(mnt.MountPath) + + // check for overlap with VolumeDevice + if mountNameAlreadyExists(mnt.Name, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), mnt.Name, "must not already exist in volumeDevices")) + } + if mountPathAlreadyExists(mnt.MountPath, voldevices) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("mountPath"), mnt.MountPath, "must not already exist as a path in volumeDevices")) + } + if len(mnt.SubPath) > 0 { allErrs = append(allErrs, validateLocalDescendingPath(mnt.SubPath, fldPath.Child("subPath"))...) } @@ -1973,7 +2185,61 @@ func ValidateVolumeMounts(mounts []api.VolumeMount, volumes sets.String, contain return allErrs } -func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { +func ValidateVolumeDevices(devices []core.VolumeDevice, volmounts map[string]string, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + devicepath := sets.NewString() + devicename := sets.NewString() + + if devices != nil && !utilfeature.DefaultFeatureGate.Enabled(features.BlockVolume) { + allErrs = append(allErrs, field.Forbidden(fldPath.Child("volumeDevices"), "Container volumeDevices is disabled by feature-gate")) + return allErrs + } + if devices != nil { + for i, dev := range devices { + idxPath := fldPath.Index(i) + devName := dev.Name + devPath := dev.DevicePath + didMatch, isPVC := isMatchedDevice(devName, volumes) + if len(devName) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("name"), "")) + } + if devicename.Has(devName) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must be unique")) + } + // Must be PersistentVolumeClaim volume source + if didMatch && !isPVC { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "can only use volume source type of PersistentVolumeClaim for block mode")) + } + if !didMatch { + allErrs = append(allErrs, field.NotFound(idxPath.Child("name"), devName)) + } + if len(devPath) == 0 { + allErrs = append(allErrs, field.Required(idxPath.Child("devicePath"), "")) + } + if devicepath.Has(devPath) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must be unique")) + } + if len(devPath) > 0 && len(validatePathNoBacksteps(devPath, fldPath.Child("devicePath"))) > 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "can not contain backsteps ('..')")) + } else { + devicepath.Insert(devPath) + } + // check for overlap with VolumeMount + if deviceNameAlreadyExists(devName, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), devName, "must not already exist in volumeMounts")) + } + if devicePathAlreadyExists(devPath, volmounts) { + allErrs = append(allErrs, field.Invalid(idxPath.Child("devicePath"), devPath, "must not already exist as a path in volumeMounts")) + } + if len(devName) > 0 { + devicename.Insert(devName) + } + } + } + return allErrs +} + +func validateProbe(probe *core.Probe, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if probe == nil { @@ -1989,18 +2255,18 @@ func validateProbe(probe *api.Probe, fldPath *field.Path) field.ErrorList { return allErrs } -func validateClientIPAffinityConfig(config *api.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { +func validateClientIPAffinityConfig(config *core.SessionAffinityConfig, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if config == nil { - allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath, fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } if config.ClientIP == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } if config.ClientIP.TimeoutSeconds == nil { - allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", api.ServiceAffinityClientIP))) + allErrs = append(allErrs, field.Required(fldPath.Child("clientIP").Child("timeoutSeconds"), fmt.Sprintf("when session affinity type is %s", core.ServiceAffinityClientIP))) return allErrs } allErrs = append(allErrs, validateAffinityTimeout(config.ClientIP.TimeoutSeconds, fldPath.Child("clientIP").Child("timeoutSeconds"))...) @@ -2010,15 +2276,15 @@ func validateClientIPAffinityConfig(config *api.SessionAffinityConfig, fldPath * func validateAffinityTimeout(timeout *int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if *timeout <= 0 || *timeout > api.MaxClientIPServiceAffinitySeconds { - allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", api.MaxClientIPServiceAffinitySeconds))) + if *timeout <= 0 || *timeout > core.MaxClientIPServiceAffinitySeconds { + allErrs = append(allErrs, field.Invalid(fldPath, timeout, fmt.Sprintf("must be greater than 0 and less than %d", core.MaxClientIPServiceAffinitySeconds))) } return allErrs } // AccumulateUniqueHostPorts extracts each HostPort of each Container, // accumulating the results and returning an error if any ports conflict. -func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { +func AccumulateUniqueHostPorts(containers []core.Container, accumulator *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for ci, ctr := range containers { @@ -2043,12 +2309,12 @@ func AccumulateUniqueHostPorts(containers []api.Container, accumulator *sets.Str // checkHostPortConflicts checks for colliding Port.HostPort values across // a slice of containers. -func checkHostPortConflicts(containers []api.Container, fldPath *field.Path) field.ErrorList { +func checkHostPortConflicts(containers []core.Container, fldPath *field.Path) field.ErrorList { allPorts := sets.String{} return AccumulateUniqueHostPorts(containers, &allPorts, fldPath) } -func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorList { +func validateExecAction(exec *core.ExecAction, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if len(exec.Command) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("command"), "")) @@ -2056,9 +2322,9 @@ func validateExecAction(exec *api.ExecAction, fldPath *field.Path) field.ErrorLi return allErrors } -var supportedHTTPSchemes = sets.NewString(string(api.URISchemeHTTP), string(api.URISchemeHTTPS)) +var supportedHTTPSchemes = sets.NewString(string(core.URISchemeHTTP), string(core.URISchemeHTTPS)) -func validateHTTPGetAction(http *api.HTTPGetAction, fldPath *field.Path) field.ErrorList { +func validateHTTPGetAction(http *core.HTTPGetAction, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if len(http.Path) == 0 { allErrors = append(allErrors, field.Required(fldPath.Child("path"), "")) @@ -2091,11 +2357,11 @@ func ValidatePortNumOrName(port intstr.IntOrString, fldPath *field.Path) field.E return allErrs } -func validateTCPSocketAction(tcp *api.TCPSocketAction, fldPath *field.Path) field.ErrorList { +func validateTCPSocketAction(tcp *core.TCPSocketAction, fldPath *field.Path) field.ErrorList { return ValidatePortNumOrName(tcp.Port, fldPath.Child("port")) } -func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList { +func validateHandler(handler *core.Handler, fldPath *field.Path) field.ErrorList { numHandlers := 0 allErrors := field.ErrorList{} if handler.Exec != nil { @@ -2128,7 +2394,7 @@ func validateHandler(handler *api.Handler, fldPath *field.Path) field.ErrorList return allErrors } -func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.ErrorList { +func validateLifecycle(lifecycle *core.Lifecycle, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if lifecycle.PostStart != nil { allErrs = append(allErrs, validateHandler(lifecycle.PostStart, fldPath.Child("postStart"))...) @@ -2139,13 +2405,13 @@ func validateLifecycle(lifecycle *api.Lifecycle, fldPath *field.Path) field.Erro return allErrs } -var supportedPullPolicies = sets.NewString(string(api.PullAlways), string(api.PullIfNotPresent), string(api.PullNever)) +var supportedPullPolicies = sets.NewString(string(core.PullAlways), string(core.PullIfNotPresent), string(core.PullNever)) -func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorList { +func validatePullPolicy(policy core.PullPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch policy { - case api.PullAlways, api.PullIfNotPresent, api.PullNever: + case core.PullAlways, core.PullIfNotPresent, core.PullNever: break case "": allErrors = append(allErrors, field.Required(fldPath, "")) @@ -2156,10 +2422,10 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL return allErrors } -func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { +func validateInitContainers(containers, otherContainers []core.Container, deviceVolumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { var allErrs field.ErrorList if len(containers) > 0 { - allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...) + allErrs = append(allErrs, validateContainers(containers, deviceVolumes, fldPath)...) } allNames := sets.String{} @@ -2187,7 +2453,7 @@ func validateInitContainers(containers, otherContainers []api.Container, volumes return allErrs } -func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList { +func validateContainers(containers []core.Container, volumes map[string]core.VolumeSource, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(containers) == 0 { @@ -2198,6 +2464,9 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath for i, ctr := range containers { idxPath := fldPath.Index(i) namePath := idxPath.Child("name") + volMounts := GetVolumeMountMap(ctr.VolumeMounts) + volDevices := GetVolumeDeviceMap(ctr.VolumeDevices) + if len(ctr.Name) == 0 { allErrs = append(allErrs, field.Required(namePath, "")) } else { @@ -2224,7 +2493,7 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath } switch ctr.TerminationMessagePolicy { - case api.TerminationMessageReadFile, api.TerminationMessageFallbackToLogsOnError: + case core.TerminationMessageReadFile, core.TerminationMessageFallbackToLogsOnError: case "": allErrs = append(allErrs, field.Required(idxPath.Child("terminationMessagePolicy"), "must be 'File' or 'FallbackToLogsOnError'")) default: @@ -2235,7 +2504,8 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath allErrs = append(allErrs, validateContainerPorts(ctr.Ports, idxPath.Child("ports"))...) allErrs = append(allErrs, ValidateEnv(ctr.Env, idxPath.Child("env"))...) allErrs = append(allErrs, ValidateEnvFrom(ctr.EnvFrom, idxPath.Child("envFrom"))...) - allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeMounts(ctr.VolumeMounts, volDevices, volumes, &ctr, idxPath.Child("volumeMounts"))...) + allErrs = append(allErrs, ValidateVolumeDevices(ctr.VolumeDevices, volMounts, volumes, idxPath.Child("volumeDevices"))...) allErrs = append(allErrs, validatePullPolicy(ctr.ImagePullPolicy, idxPath.Child("imagePullPolicy"))...) allErrs = append(allErrs, ValidateResourceRequirements(&ctr.Resources, idxPath.Child("resources"))...) allErrs = append(allErrs, ValidateSecurityContext(ctr.SecurityContext, idxPath.Child("securityContext"))...) @@ -2246,36 +2516,101 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath return allErrs } -func validateRestartPolicy(restartPolicy *api.RestartPolicy, fldPath *field.Path) field.ErrorList { +func validateRestartPolicy(restartPolicy *core.RestartPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch *restartPolicy { - case api.RestartPolicyAlways, api.RestartPolicyOnFailure, api.RestartPolicyNever: + case core.RestartPolicyAlways, core.RestartPolicyOnFailure, core.RestartPolicyNever: break case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(api.RestartPolicyAlways), string(api.RestartPolicyOnFailure), string(api.RestartPolicyNever)} + validValues := []string{string(core.RestartPolicyAlways), string(core.RestartPolicyOnFailure), string(core.RestartPolicyNever)} allErrors = append(allErrors, field.NotSupported(fldPath, *restartPolicy, validValues)) } return allErrors } -func validateDNSPolicy(dnsPolicy *api.DNSPolicy, fldPath *field.Path) field.ErrorList { +func validateDNSPolicy(dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} switch *dnsPolicy { - case api.DNSClusterFirstWithHostNet, api.DNSClusterFirst, api.DNSDefault: - break + case core.DNSClusterFirstWithHostNet, core.DNSClusterFirst, core.DNSDefault: + case core.DNSNone: + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + allErrors = append(allErrors, field.Invalid(fldPath, dnsPolicy, "DNSPolicy: can not use 'None', custom pod DNS is disabled by feature gate")) + } case "": allErrors = append(allErrors, field.Required(fldPath, "")) default: - validValues := []string{string(api.DNSClusterFirstWithHostNet), string(api.DNSClusterFirst), string(api.DNSDefault)} + validValues := []string{string(core.DNSClusterFirstWithHostNet), string(core.DNSClusterFirst), string(core.DNSDefault)} + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + validValues = append(validValues, string(core.DNSNone)) + } allErrors = append(allErrors, field.NotSupported(fldPath, dnsPolicy, validValues)) } return allErrors } -func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath *field.Path) field.ErrorList { +const ( + // Limits on various DNS parameters. These are derived from + // restrictions in Linux libc name resolution handling. + // Max number of DNS name servers. + MaxDNSNameservers = 3 + // Max number of domains in search path. + MaxDNSSearchPaths = 6 + // Max number of characters in search path. + MaxDNSSearchListChars = 256 +) + +func validatePodDNSConfig(dnsConfig *core.PodDNSConfig, dnsPolicy *core.DNSPolicy, fldPath *field.Path) field.ErrorList { + allErrs := field.ErrorList{} + + // Validate DNSNone case. Must provide at least one DNS name server. + if utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) && dnsPolicy != nil && *dnsPolicy == core.DNSNone { + if dnsConfig == nil { + return append(allErrs, field.Required(fldPath, fmt.Sprintf("must provide `dnsConfig` when `dnsPolicy` is %s", core.DNSNone))) + } + if len(dnsConfig.Nameservers) == 0 { + return append(allErrs, field.Required(fldPath.Child("nameservers"), fmt.Sprintf("must provide at least one DNS nameserver when `dnsPolicy` is %s", core.DNSNone))) + } + } + + if dnsConfig != nil { + if !utilfeature.DefaultFeatureGate.Enabled(features.CustomPodDNS) { + return append(allErrs, field.Forbidden(fldPath, "DNSConfig: custom pod DNS is disabled by feature gate")) + } + + // Validate nameservers. + if len(dnsConfig.Nameservers) > MaxDNSNameservers { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers"), dnsConfig.Nameservers, fmt.Sprintf("must not have more than %v nameservers", MaxDNSNameservers))) + } + for i, ns := range dnsConfig.Nameservers { + if ip := net.ParseIP(ns); ip == nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("nameservers").Index(i), ns, "must be valid IP address")) + } + } + // Validate searches. + if len(dnsConfig.Searches) > MaxDNSSearchPaths { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, fmt.Sprintf("must not have more than %v search paths", MaxDNSSearchPaths))) + } + // Include the space between search paths. + if len(strings.Join(dnsConfig.Searches, " ")) > MaxDNSSearchListChars { + allErrs = append(allErrs, field.Invalid(fldPath.Child("searches"), dnsConfig.Searches, "must not have more than 256 characters (including spaces) in the search list")) + } + for i, search := range dnsConfig.Searches { + allErrs = append(allErrs, ValidateDNS1123Subdomain(search, fldPath.Child("searches").Index(i))...) + } + // Validate options. + for i, option := range dnsConfig.Options { + if len(option.Name) == 0 { + allErrs = append(allErrs, field.Required(fldPath.Child("options").Index(i), "must not be empty")) + } + } + } + return allErrs +} + +func validateHostNetwork(hostNetwork bool, containers []core.Container, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if hostNetwork { for i, container := range containers { @@ -2295,11 +2630,11 @@ func validateHostNetwork(hostNetwork bool, containers []api.Container, fldPath * // formed. Right now, we only expect name to be set (it's the only field). If // this ever changes and someone decides to set those fields, we'd like to // know. -func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPath *field.Path) field.ErrorList { +func validateImagePullSecrets(imagePullSecrets []core.LocalObjectReference, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} for i, currPullSecret := range imagePullSecrets { idxPath := fldPath.Index(i) - strippedRef := api.LocalObjectReference{Name: currPullSecret.Name} + strippedRef := core.LocalObjectReference{Name: currPullSecret.Name} if !reflect.DeepEqual(strippedRef, currPullSecret) { allErrors = append(allErrors, field.Invalid(idxPath, currPullSecret, "only name may be set")) } @@ -2308,7 +2643,7 @@ func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPa } // validateAffinity checks if given affinities are valid -func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorList { +func validateAffinity(affinity *core.Affinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if affinity != nil { @@ -2337,7 +2672,7 @@ func validateAffinity(affinity *api.Affinity, fldPath *field.Path) field.ErrorLi return allErrs } -func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { +func validateTaintEffect(effect *core.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList { if !allowEmpty && len(*effect) == 0 { return field.ErrorList{field.Required(fldPath, "")} } @@ -2345,15 +2680,15 @@ func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *fiel allErrors := field.ErrorList{} switch *effect { // TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit. - case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoExecute: - // case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoExecute: + case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoExecute: + // case core.TaintEffectNoSchedule, core.TaintEffectPreferNoSchedule, core.TaintEffectNoScheduleNoAdmit, core.TaintEffectNoExecute: default: validValues := []string{ - string(api.TaintEffectNoSchedule), - string(api.TaintEffectPreferNoSchedule), - string(api.TaintEffectNoExecute), + string(core.TaintEffectNoSchedule), + string(core.TaintEffectPreferNoSchedule), + string(core.TaintEffectNoExecute), // TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit. - // string(api.TaintEffectNoScheduleNoAdmit), + // string(core.TaintEffectNoScheduleNoAdmit), } allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues)) } @@ -2361,7 +2696,7 @@ func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *fiel } // validateOnlyAddedTolerations validates updated pod tolerations. -func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldTolerations []api.Toleration, fldPath *field.Path) field.ErrorList { +func validateOnlyAddedTolerations(newTolerations []core.Toleration, oldTolerations []core.Toleration, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, old := range oldTolerations { found := false @@ -2383,7 +2718,7 @@ func validateOnlyAddedTolerations(newTolerations []api.Toleration, oldToleration return allErrs } -func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field.ErrorList { +func ValidateHostAliases(hostAliases []core.HostAlias, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for _, hostAlias := range hostAliases { if ip := net.ParseIP(hostAlias.IP); ip == nil { @@ -2397,7 +2732,7 @@ func ValidateHostAliases(hostAliases []api.HostAlias, fldPath *field.Path) field } // ValidateTolerations tests if given tolerations have valid data. -func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList { +func ValidateTolerations(tolerations []core.Toleration, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} for i, toleration := range tolerations { idxPath := fldPath.Index(i) @@ -2407,12 +2742,12 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel } // empty toleration key with Exists operator and empty value means match all taints - if len(toleration.Key) == 0 && toleration.Operator != api.TolerationOpExists { + if len(toleration.Key) == 0 && toleration.Operator != core.TolerationOpExists { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Operator, "operator must be Exists when `key` is empty, which means \"match all values and all keys\"")) } - if toleration.TolerationSeconds != nil && toleration.Effect != api.TaintEffectNoExecute { + if toleration.TolerationSeconds != nil && toleration.Effect != core.TaintEffectNoExecute { allErrors = append(allErrors, field.Invalid(idxPath.Child("effect"), toleration.Effect, "effect must be 'NoExecute' when `tolerationSeconds` is set")) } @@ -2420,16 +2755,16 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel // validate toleration operator and value switch toleration.Operator { // empty operator means Equal - case api.TolerationOpEqual, "": + case core.TolerationOpEqual, "": if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";"))) } - case api.TolerationOpExists: + case core.TolerationOpExists: if len(toleration.Value) > 0 { allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'")) } default: - validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)} + validValues := []string{string(core.TolerationOpEqual), string(core.TolerationOpExists)} allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues)) } @@ -2441,15 +2776,15 @@ func ValidateTolerations(tolerations []api.Toleration, fldPath *field.Path) fiel return allErrors } -func toResourceNames(resources api.ResourceList) []api.ResourceName { - result := []api.ResourceName{} +func toResourceNames(resources core.ResourceList) []core.ResourceName { + result := []core.ResourceName{} for resourceName := range resources { result = append(result, resourceName) } return result } -func toSet(resourceNames []api.ResourceName) sets.String { +func toSet(resourceNames []core.ResourceName) sets.String { result := sets.NewString() for _, resourceName := range resourceNames { result.Insert(string(resourceName)) @@ -2457,7 +2792,7 @@ func toSet(resourceNames []api.ResourceName) sets.String { return result } -func toContainerResourcesSet(ctr *api.Container) sets.String { +func toContainerResourcesSet(ctr *core.Container) sets.String { resourceNames := toResourceNames(ctr.Resources.Requests) resourceNames = append(resourceNames, toResourceNames(ctr.Resources.Limits)...) return toSet(resourceNames) @@ -2465,7 +2800,7 @@ func toContainerResourcesSet(ctr *api.Container) sets.String { // validateContainersOnlyForPod does additional validation for containers on a pod versus a pod template // it only does additive validation of fields not covered in validateContainers -func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Path) field.ErrorList { +func validateContainersOnlyForPod(containers []core.Container, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ctr := range containers { idxPath := fldPath.Index(i) @@ -2477,7 +2812,7 @@ func validateContainersOnlyForPod(containers []api.Container, fldPath *field.Pat } // ValidatePod tests if required fields in the pod are set. -func ValidatePod(pod *api.Pod) field.ErrorList { +func ValidatePod(pod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotations(pod.ObjectMeta.Annotations, &pod.Spec, fldPath.Child("annotations"))...) @@ -2512,19 +2847,20 @@ func ValidatePod(pod *api.Pod) field.ErrorList { // This includes checking formatting and uniqueness. It also canonicalizes the // structure by setting default values and implementing any backwards-compatibility // tricks. -func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodSpec(spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - allVolumes, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) + vols, vErrs := ValidateVolumes(spec.Volumes, fldPath.Child("volumes")) allErrs = append(allErrs, vErrs...) - allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...) - allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...) + allErrs = append(allErrs, validateContainers(spec.Containers, vols, fldPath.Child("containers"))...) + allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, vols, fldPath.Child("initContainers"))...) allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...) allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...) allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...) allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...) allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...) allErrs = append(allErrs, validateAffinity(spec.Affinity, fldPath.Child("affinity"))...) + allErrs = append(allErrs, validatePodDNSConfig(spec.DNSConfig, &spec.DNSPolicy, fldPath.Child("dnsConfig"))...) if len(spec.ServiceAccountName) > 0 { for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) { allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg)) @@ -2578,19 +2914,19 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList { } // ValidateNodeSelectorRequirement tests that the specified NodeSelectorRequirement fields has valid data -func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorRequirement(rq core.NodeSelectorRequirement, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} switch rq.Operator { - case api.NodeSelectorOpIn, api.NodeSelectorOpNotIn: + case core.NodeSelectorOpIn, core.NodeSelectorOpNotIn: if len(rq.Values) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified when `operator` is 'In' or 'NotIn'")) } - case api.NodeSelectorOpExists, api.NodeSelectorOpDoesNotExist: + case core.NodeSelectorOpExists, core.NodeSelectorOpDoesNotExist: if len(rq.Values) > 0 { allErrs = append(allErrs, field.Forbidden(fldPath.Child("values"), "may not be specified when `operator` is 'Exists' or 'DoesNotExist'")) } - case api.NodeSelectorOpGt, api.NodeSelectorOpLt: + case core.NodeSelectorOpGt, core.NodeSelectorOpLt: if len(rq.Values) != 1 { allErrs = append(allErrs, field.Required(fldPath.Child("values"), "must be specified single value when `operator` is 'Lt' or 'Gt'")) } @@ -2602,7 +2938,7 @@ func ValidateNodeSelectorRequirement(rq api.NodeSelectorRequirement, fldPath *fi } // ValidateNodeSelectorTerm tests that the specified node selector term has valid data -func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelectorTerm(term core.NodeSelectorTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(term.MatchExpressions) == 0 { @@ -2615,7 +2951,7 @@ func ValidateNodeSelectorTerm(term api.NodeSelectorTerm, fldPath *field.Path) fi } // ValidateNodeSelector tests that the specified nodeSelector fields has valid data -func ValidateNodeSelector(nodeSelector *api.NodeSelector, fldPath *field.Path) field.ErrorList { +func ValidateNodeSelector(nodeSelector *core.NodeSelector, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} termFldPath := fldPath.Child("nodeSelectorTerms") @@ -2636,18 +2972,18 @@ func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath * v1Avoids, err := v1helper.GetAvoidPodsFromNodeAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) return allErrs } - var avoids api.AvoidPods - if err := k8s_api_v1.Convert_v1_AvoidPods_To_api_AvoidPods(&v1Avoids, &avoids, nil); err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), api.PreferAvoidPodsAnnotationKey, err.Error())) + var avoids core.AvoidPods + if err := corev1.Convert_v1_AvoidPods_To_core_AvoidPods(&v1Avoids, &avoids, nil); err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child("AvoidPods"), core.PreferAvoidPodsAnnotationKey, err.Error())) return allErrs } if len(avoids.PreferAvoidPods) != 0 { for i, pa := range avoids.PreferAvoidPods { - idxPath := fldPath.Child(api.PreferAvoidPodsAnnotationKey).Index(i) + idxPath := fldPath.Child(core.PreferAvoidPodsAnnotationKey).Index(i) allErrs = append(allErrs, validatePreferAvoidPodsEntry(pa, idxPath)...) } } @@ -2656,7 +2992,7 @@ func ValidateAvoidPodsInNodeAnnotations(annotations map[string]string, fldPath * } // validatePreferAvoidPodsEntry tests if given PreferAvoidPodsEntry has valid data. -func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { +func validatePreferAvoidPodsEntry(avoidPodEntry core.PreferAvoidPodsEntry, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} if avoidPodEntry.PodSignature.PodController == nil { allErrors = append(allErrors, field.Required(fldPath.Child("PodSignature"), "")) @@ -2671,7 +3007,7 @@ func validatePreferAvoidPodsEntry(avoidPodEntry api.PreferAvoidPodsEntry, fldPat } // ValidatePreferredSchedulingTerms tests that the specified SoftNodeAffinity fields has valid data -func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { +func ValidatePreferredSchedulingTerms(terms []core.PreferredSchedulingTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, term := range terms { @@ -2685,7 +3021,7 @@ func ValidatePreferredSchedulingTerms(terms []api.PreferredSchedulingTerm, fldPa } // validatePodAffinityTerm tests that the specified podAffinityTerm fields have valid data -func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validatePodAffinityTerm(podAffinityTerm core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...) @@ -2701,7 +3037,7 @@ func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, fldPath *field } // validatePodAffinityTerms tests that the specified podAffinityTerms fields have valid data -func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validatePodAffinityTerms(podAffinityTerms []core.PodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, podAffinityTerm := range podAffinityTerms { allErrs = append(allErrs, validatePodAffinityTerm(podAffinityTerm, fldPath.Index(i))...) @@ -2710,7 +3046,7 @@ func validatePodAffinityTerms(podAffinityTerms []api.PodAffinityTerm, fldPath *f } // validateWeightedPodAffinityTerms tests that the specified weightedPodAffinityTerms fields have valid data -func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { +func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []core.WeightedPodAffinityTerm, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for j, weightedTerm := range weightedPodAffinityTerms { if weightedTerm.Weight <= 0 || weightedTerm.Weight > 100 { @@ -2722,7 +3058,7 @@ func validateWeightedPodAffinityTerms(weightedPodAffinityTerms []api.WeightedPod } // validatePodAntiAffinity tests that the specified podAntiAffinity fields have valid data -func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *field.Path) field.ErrorList { +func validatePodAntiAffinity(podAntiAffinity *core.PodAntiAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if podAntiAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { @@ -2741,7 +3077,7 @@ func validatePodAntiAffinity(podAntiAffinity *api.PodAntiAffinity, fldPath *fiel } // validatePodAffinity tests that the specified podAffinity fields have valid data -func validatePodAffinity(podAffinity *api.PodAffinity, fldPath *field.Path) field.ErrorList { +func validatePodAffinity(podAffinity *core.PodAffinity, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} // TODO:Uncomment below code once RequiredDuringSchedulingRequiredDuringExecution is implemented. // if podAffinity.RequiredDuringSchedulingRequiredDuringExecution != nil { @@ -2774,11 +3110,11 @@ func ValidateSeccompProfile(p string, fldPath *field.Path) field.ErrorList { func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if p, exists := annotations[api.SeccompPodAnnotationKey]; exists { - allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(api.SeccompPodAnnotationKey))...) + if p, exists := annotations[core.SeccompPodAnnotationKey]; exists { + allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(core.SeccompPodAnnotationKey))...) } for k, p := range annotations { - if strings.HasPrefix(k, api.SeccompContainerAnnotationKeyPrefix) { + if strings.HasPrefix(k, core.SeccompContainerAnnotationKeyPrefix) { allErrs = append(allErrs, ValidateSeccompProfile(p, fldPath.Child(k))...) } } @@ -2786,7 +3122,7 @@ func ValidateSeccompPodAnnotations(annotations map[string]string, fldPath *field return allErrs } -func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.PodSpec, fldPath *field.Path) field.ErrorList { +func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *core.PodSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for k, p := range annotations { if !strings.HasPrefix(k, apparmor.ContainerAnnotationKeyPrefix) { @@ -2810,7 +3146,7 @@ func ValidateAppArmorPodAnnotations(annotations map[string]string, spec *api.Pod return allErrs } -func podSpecHasContainer(spec *api.PodSpec, containerName string) bool { +func podSpecHasContainer(spec *core.PodSpec, containerName string) bool { for _, c := range spec.InitContainers { if c.Name == containerName { return true @@ -2846,7 +3182,7 @@ func IsValidSysctlName(name string) bool { return sysctlRegexp.MatchString(name) } -func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList { +func validateSysctls(sysctls []core.Sysctl, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, s := range sysctls { if len(s.Name) == 0 { @@ -2859,7 +3195,7 @@ func validateSysctls(sysctls []api.Sysctl, fldPath *field.Path) field.ErrorList } // ValidatePodSecurityContext test that the specified PodSecurityContext has valid data. -func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList { +func ValidatePodSecurityContext(securityContext *core.PodSecurityContext, spec *core.PodSpec, specPath, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if securityContext != nil { @@ -2884,7 +3220,7 @@ func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *a return allErrs } -func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { +func ValidateContainerUpdates(newContainers, oldContainers []core.Container, fldPath *field.Path) (allErrs field.ErrorList, stop bool) { allErrs = field.ErrorList{} if len(newContainers) != len(oldContainers) { //TODO: Pinpoint the specific container that causes the invalid error after we have strategic merge diff @@ -2907,7 +3243,7 @@ func ValidateContainerUpdates(newContainers, oldContainers []api.Container, fldP // ValidatePodUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { +func ValidatePodUpdate(newPod, oldPod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) @@ -2952,14 +3288,14 @@ func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { // handle updateable fields by munging those fields prior to deep equal comparison. mungedPod := *newPod // munge spec.containers[*].image - var newContainers []api.Container + var newContainers []core.Container for ix, container := range mungedPod.Spec.Containers { container.Image = oldPod.Spec.Containers[ix].Image newContainers = append(newContainers, container) } mungedPod.Spec.Containers = newContainers // munge spec.initContainers[*].image - var newInitContainers []api.Container + var newInitContainers []core.Container for ix, container := range mungedPod.Spec.InitContainers { container.Image = oldPod.Spec.InitContainers[ix].Image newInitContainers = append(newInitContainers, container) @@ -2988,7 +3324,7 @@ func ValidatePodUpdate(newPod, oldPod *api.Pod) field.ErrorList { // ValidatePodStatusUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { +func ValidatePodStatusUpdate(newPod, oldPod *core.Pod) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&newPod.ObjectMeta, &oldPod.ObjectMeta, fldPath) allErrs = append(allErrs, ValidatePodSpecificAnnotationUpdates(newPod, oldPod, fldPath.Child("annotations"))...) @@ -3004,7 +3340,7 @@ func ValidatePodStatusUpdate(newPod, oldPod *api.Pod) field.ErrorList { } // ValidatePodBinding tests if required fields in the pod binding are legal. -func ValidatePodBinding(binding *api.Binding) field.ErrorList { +func ValidatePodBinding(binding *core.Binding) field.ErrorList { allErrs := field.ErrorList{} if len(binding.Target.Kind) != 0 && binding.Target.Kind != "Node" { @@ -3020,7 +3356,7 @@ func ValidatePodBinding(binding *api.Binding) field.ErrorList { } // ValidatePodTemplate tests if required fields in the pod template are set. -func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { +func ValidatePodTemplate(pod *core.PodTemplate) field.ErrorList { allErrs := ValidateObjectMeta(&pod.ObjectMeta, true, ValidatePodName, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePodTemplateSpec(&pod.Template, field.NewPath("template"))...) return allErrs @@ -3028,27 +3364,27 @@ func ValidatePodTemplate(pod *api.PodTemplate) field.ErrorList { // ValidatePodTemplateUpdate tests to see if the update is legal for an end user to make. newPod is updated with fields // that cannot be changed. -func ValidatePodTemplateUpdate(newPod, oldPod *api.PodTemplate) field.ErrorList { +func ValidatePodTemplateUpdate(newPod, oldPod *core.PodTemplate) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&oldPod.ObjectMeta, &newPod.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidatePodTemplateSpec(&newPod.Template, field.NewPath("template"))...) return allErrs } -var supportedSessionAffinityType = sets.NewString(string(api.ServiceAffinityClientIP), string(api.ServiceAffinityNone)) -var supportedServiceType = sets.NewString(string(api.ServiceTypeClusterIP), string(api.ServiceTypeNodePort), - string(api.ServiceTypeLoadBalancer), string(api.ServiceTypeExternalName)) +var supportedSessionAffinityType = sets.NewString(string(core.ServiceAffinityClientIP), string(core.ServiceAffinityNone)) +var supportedServiceType = sets.NewString(string(core.ServiceTypeClusterIP), string(core.ServiceTypeNodePort), + string(core.ServiceTypeLoadBalancer), string(core.ServiceTypeExternalName)) // ValidateService tests if required fields/annotations of a Service are valid. -func ValidateService(service *api.Service) field.ErrorList { +func ValidateService(service *core.Service) field.ErrorList { allErrs := ValidateObjectMeta(&service.ObjectMeta, true, ValidateServiceName, field.NewPath("metadata")) specPath := field.NewPath("spec") - isHeadlessService := service.Spec.ClusterIP == api.ClusterIPNone - if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != api.ServiceTypeExternalName { + isHeadlessService := service.Spec.ClusterIP == core.ClusterIPNone + if len(service.Spec.Ports) == 0 && !isHeadlessService && service.Spec.Type != core.ServiceTypeExternalName { allErrs = append(allErrs, field.Required(specPath.Child("ports"), "")) } switch service.Spec.Type { - case api.ServiceTypeLoadBalancer: + case core.ServiceTypeLoadBalancer: for ix := range service.Spec.Ports { port := &service.Spec.Ports[ix] // This is a workaround for broken cloud environments that @@ -3062,11 +3398,11 @@ func ValidateService(service *api.Service) field.ErrorList { if service.Spec.ClusterIP == "None" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for LoadBalancer services")) } - case api.ServiceTypeNodePort: + case core.ServiceTypeNodePort: if service.Spec.ClusterIP == "None" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "may not be set to 'None' for NodePort services")) } - case api.ServiceTypeExternalName: + case core.ServiceTypeExternalName: if service.Spec.ClusterIP != "" { allErrs = append(allErrs, field.Invalid(specPath.Child("clusterIP"), service.Spec.ClusterIP, "must be empty for ExternalName services")) } @@ -3094,11 +3430,11 @@ func ValidateService(service *api.Service) field.ErrorList { allErrs = append(allErrs, field.NotSupported(specPath.Child("sessionAffinity"), service.Spec.SessionAffinity, supportedSessionAffinityType.List())) } - if service.Spec.SessionAffinity == api.ServiceAffinityClientIP { + if service.Spec.SessionAffinity == core.ServiceAffinityClientIP { allErrs = append(allErrs, validateClientIPAffinityConfig(service.Spec.SessionAffinityConfig, specPath.Child("sessionAffinityConfig"))...) - } else if service.Spec.SessionAffinity == api.ServiceAffinityNone { + } else if service.Spec.SessionAffinity == core.ServiceAffinityNone { if service.Spec.SessionAffinityConfig != nil { - allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(api.ServiceAffinityNone)))) + allErrs = append(allErrs, field.Forbidden(specPath.Child("sessionAffinityConfig"), fmt.Sprintf("must not be set when session affinity is %s", string(core.ServiceAffinityNone)))) } } @@ -3126,7 +3462,7 @@ func ValidateService(service *api.Service) field.ErrorList { allErrs = append(allErrs, field.NotSupported(specPath.Child("type"), service.Spec.Type, supportedServiceType.List())) } - if service.Spec.Type == api.ServiceTypeLoadBalancer { + if service.Spec.Type == core.ServiceTypeLoadBalancer { portsPath := specPath.Child("ports") includeProtocols := sets.NewString() for i := range service.Spec.Ports { @@ -3142,7 +3478,7 @@ func ValidateService(service *api.Service) field.ErrorList { } } - if service.Spec.Type == api.ServiceTypeClusterIP { + if service.Spec.Type == core.ServiceTypeClusterIP { portsPath := specPath.Child("ports") for i := range service.Spec.Ports { portPath := portsPath.Index(i) @@ -3154,14 +3490,14 @@ func ValidateService(service *api.Service) field.ErrorList { // Check for duplicate NodePorts, considering (protocol,port) pairs portsPath = specPath.Child("ports") - nodePorts := make(map[api.ServicePort]bool) + nodePorts := make(map[core.ServicePort]bool) for i := range service.Spec.Ports { port := &service.Spec.Ports[i] if port.NodePort == 0 { continue } portPath := portsPath.Index(i) - var key api.ServicePort + var key core.ServicePort key.Protocol = port.Protocol key.NodePort = port.NodePort _, found := nodePorts[key] @@ -3173,10 +3509,10 @@ func ValidateService(service *api.Service) field.ErrorList { // Check for duplicate Ports, considering (protocol,port) pairs portsPath = specPath.Child("ports") - ports := make(map[api.ServicePort]bool) + ports := make(map[core.ServicePort]bool) for i, port := range service.Spec.Ports { portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, Port: port.Port} + key := core.ServicePort{Protocol: port.Protocol, Port: port.Port} _, found := ports[key] if found { allErrs = append(allErrs, field.Duplicate(portPath, key)) @@ -3184,24 +3520,8 @@ func ValidateService(service *api.Service) field.ErrorList { ports[key] = true } - // Check for duplicate TargetPort - portsPath = specPath.Child("ports") - targetPorts := make(map[api.ServicePort]bool) - for i, port := range service.Spec.Ports { - if (port.TargetPort.Type == intstr.Int && port.TargetPort.IntVal == 0) || (port.TargetPort.Type == intstr.String && port.TargetPort.StrVal == "") { - continue - } - portPath := portsPath.Index(i) - key := api.ServicePort{Protocol: port.Protocol, TargetPort: port.TargetPort} - _, found := targetPorts[key] - if found { - allErrs = append(allErrs, field.Duplicate(portPath.Child("targetPort"), port.TargetPort)) - } - targetPorts[key] = true - } - // Validate SourceRange field and annotation - _, ok := service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] + _, ok := service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] if len(service.Spec.LoadBalancerSourceRanges) > 0 || ok { var fieldPath *field.Path var val string @@ -3209,10 +3529,10 @@ func ValidateService(service *api.Service) field.ErrorList { fieldPath = specPath.Child("LoadBalancerSourceRanges") val = fmt.Sprintf("%v", service.Spec.LoadBalancerSourceRanges) } else { - fieldPath = field.NewPath("metadata", "annotations").Key(api.AnnotationLoadBalancerSourceRangesKey) - val = service.Annotations[api.AnnotationLoadBalancerSourceRangesKey] + fieldPath = field.NewPath("metadata", "annotations").Key(core.AnnotationLoadBalancerSourceRangesKey) + val = service.Annotations[core.AnnotationLoadBalancerSourceRangesKey] } - if service.Spec.Type != api.ServiceTypeLoadBalancer { + if service.Spec.Type != core.ServiceTypeLoadBalancer { allErrs = append(allErrs, field.Invalid(fieldPath, "", "may only be used when `type` is 'LoadBalancer'")) } _, err := apiservice.GetLoadBalancerSourceRanges(service) @@ -3226,7 +3546,7 @@ func ValidateService(service *api.Service) field.ErrorList { return allErrs } -func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { +func validateServicePort(sp *core.ServicePort, requireName, isHeadlessService bool, allNames *sets.String, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if requireName && len(sp.Name) == 0 { @@ -3266,15 +3586,15 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo // validateServiceExternalTrafficFieldsValue validates ExternalTraffic related annotations // have legal value. -func validateServiceExternalTrafficFieldsValue(service *api.Service) field.ErrorList { +func validateServiceExternalTrafficFieldsValue(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} // Check first class fields. if service.Spec.ExternalTrafficPolicy != "" && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeCluster && - service.Spec.ExternalTrafficPolicy != api.ServiceExternalTrafficPolicyTypeLocal { + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeCluster && + service.Spec.ExternalTrafficPolicy != core.ServiceExternalTrafficPolicyTypeLocal { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, - fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", api.ServiceExternalTrafficPolicyTypeCluster, api.ServiceExternalTrafficPolicyTypeLocal))) + fmt.Sprintf("ExternalTrafficPolicy must be empty, %v or %v", core.ServiceExternalTrafficPolicyTypeCluster, core.ServiceExternalTrafficPolicyTypeLocal))) } if service.Spec.HealthCheckNodePort < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("spec").Child("healthCheckNodePort"), service.Spec.HealthCheckNodePort, @@ -3288,11 +3608,11 @@ func validateServiceExternalTrafficFieldsValue(service *api.Service) field.Error // HealthCheckNodePort and Type combination are legal. For update, it should be called // after clearing externalTraffic related fields for the ease of transitioning between // different service types. -func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field.ErrorList { +func ValidateServiceExternalTrafficFieldsCombination(service *core.Service) field.ErrorList { allErrs := field.ErrorList{} - if service.Spec.Type != api.ServiceTypeLoadBalancer && - service.Spec.Type != api.ServiceTypeNodePort && + if service.Spec.Type != core.ServiceTypeLoadBalancer && + service.Spec.Type != core.ServiceTypeNodePort && service.Spec.ExternalTrafficPolicy != "" { allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "externalTrafficPolicy"), service.Spec.ExternalTrafficPolicy, "ExternalTrafficPolicy can only be set on NodePort and LoadBalancer service")) @@ -3308,13 +3628,13 @@ func ValidateServiceExternalTrafficFieldsCombination(service *api.Service) field } // ValidateServiceUpdate tests if required fields in the service are set during an update -func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { +func ValidateServiceUpdate(service, oldService *core.Service) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) // ClusterIP should be immutable for services using it (every type other than ExternalName) // which do not have ClusterIP assigned yet (empty string value) - if service.Spec.Type != api.ServiceTypeExternalName { - if oldService.Spec.Type != api.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { + if service.Spec.Type != core.ServiceTypeExternalName { + if oldService.Spec.Type != core.ServiceTypeExternalName && oldService.Spec.ClusterIP != "" { allErrs = append(allErrs, ValidateImmutableField(service.Spec.ClusterIP, oldService.Spec.ClusterIP, field.NewPath("spec", "clusterIP"))...) } } @@ -3324,34 +3644,34 @@ func ValidateServiceUpdate(service, oldService *api.Service) field.ErrorList { } // ValidateServiceStatusUpdate tests if required fields in the Service are set when updating status. -func ValidateServiceStatusUpdate(service, oldService *api.Service) field.ErrorList { +func ValidateServiceStatusUpdate(service, oldService *core.Service) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&service.ObjectMeta, &oldService.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateLoadBalancerStatus(&service.Status.LoadBalancer, field.NewPath("status", "loadBalancer"))...) return allErrs } // ValidateReplicationController tests if required fields in the replication controller are set. -func ValidateReplicationController(controller *api.ReplicationController) field.ErrorList { +func ValidateReplicationController(controller *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMeta(&controller.ObjectMeta, true, ValidateReplicationControllerName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) return allErrs } // ValidateReplicationControllerUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerUpdate(controller, oldController *api.ReplicationController) field.ErrorList { +func ValidateReplicationControllerUpdate(controller, oldController *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerSpec(&controller.Spec, field.NewPath("spec"))...) return allErrs } // ValidateReplicationControllerStatusUpdate tests if required fields in the replication controller are set. -func ValidateReplicationControllerStatusUpdate(controller, oldController *api.ReplicationController) field.ErrorList { +func ValidateReplicationControllerStatusUpdate(controller, oldController *core.ReplicationController) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&controller.ObjectMeta, &oldController.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateReplicationControllerStatus(controller.Status, field.NewPath("status"))...) return allErrs } -func ValidateReplicationControllerStatus(status api.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { +func ValidateReplicationControllerStatus(status core.ReplicationControllerStatus, statusPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeField(int64(status.Replicas), statusPath.Child("replicas"))...) allErrs = append(allErrs, ValidateNonnegativeField(int64(status.FullyLabeledReplicas), statusPath.Child("fullyLabeledReplicas"))...) @@ -3385,7 +3705,7 @@ func ValidateNonEmptySelector(selectorMap map[string]string, fldPath *field.Path } // Validates the given template and ensures that it is in accordance with the desired selector and replicas. -func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpecForRC(template *core.PodTemplateSpec, selectorMap map[string]string, replicas int32, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if template == nil { allErrs = append(allErrs, field.Required(fldPath, "")) @@ -3403,8 +3723,8 @@ func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map allErrs = append(allErrs, ValidateReadOnlyPersistentDisks(template.Spec.Volumes, fldPath.Child("spec", "volumes"))...) } // RestartPolicy has already been first-order validated as per ValidatePodTemplateSpec(). - if template.Spec.RestartPolicy != api.RestartPolicyAlways { - allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(api.RestartPolicyAlways)})) + if template.Spec.RestartPolicy != core.RestartPolicyAlways { + allErrs = append(allErrs, field.NotSupported(fldPath.Child("spec", "restartPolicy"), template.Spec.RestartPolicy, []string{string(core.RestartPolicyAlways)})) } if template.Spec.ActiveDeadlineSeconds != nil { allErrs = append(allErrs, field.Invalid(fldPath.Child("spec", "activeDeadlineSeconds"), template.Spec.ActiveDeadlineSeconds, "must not be specified")) @@ -3414,7 +3734,7 @@ func ValidatePodTemplateSpecForRC(template *api.PodTemplateSpec, selectorMap map } // ValidateReplicationControllerSpec tests if required fields in the replication controller spec are set. -func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { +func ValidateReplicationControllerSpec(spec *core.ReplicationControllerSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateNonnegativeField(int64(spec.MinReadySeconds), fldPath.Child("minReadySeconds"))...) allErrs = append(allErrs, ValidateNonEmptySelector(spec.Selector, fldPath.Child("selector"))...) @@ -3424,7 +3744,7 @@ func ValidateReplicationControllerSpec(spec *api.ReplicationControllerSpec, fldP } // ValidatePodTemplateSpec validates the spec of a pod template -func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) field.ErrorList { +func ValidatePodTemplateSpec(spec *core.PodTemplateSpec, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.Labels, fldPath.Child("labels"))...) allErrs = append(allErrs, ValidateAnnotations(spec.Annotations, fldPath.Child("annotations"))...) @@ -3433,7 +3753,7 @@ func ValidatePodTemplateSpec(spec *api.PodTemplateSpec, fldPath *field.Path) fie return allErrs } -func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path) field.ErrorList { +func ValidateReadOnlyPersistentDisks(volumes []core.Volume, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i := range volumes { vol := &volumes[i] @@ -3454,22 +3774,22 @@ func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *fie taints, err := helper.GetTaintsFromNodeAnnotations(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.TaintsAnnotationKey, err.Error())) return allErrs } if len(taints) > 0 { - allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...) + allErrs = append(allErrs, validateNodeTaints(taints, fldPath.Child(core.TaintsAnnotationKey))...) } return allErrs } // validateNodeTaints tests if given taints have valid data. -func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList { +func validateNodeTaints(taints []core.Taint, fldPath *field.Path) field.ErrorList { allErrors := field.ErrorList{} - uniqueTaints := map[api.TaintEffect]sets.String{} + uniqueTaints := map[core.TaintEffect]sets.String{} for i, currTaint := range taints { idxPath := fldPath.Index(i) @@ -3502,18 +3822,18 @@ func validateNodeTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if annotations[api.TaintsAnnotationKey] != "" { + if annotations[core.TaintsAnnotationKey] != "" { allErrs = append(allErrs, ValidateTaintsInNodeAnnotations(annotations, fldPath)...) } - if annotations[api.PreferAvoidPodsAnnotationKey] != "" { + if annotations[core.PreferAvoidPodsAnnotationKey] != "" { allErrs = append(allErrs, ValidateAvoidPodsInNodeAnnotations(annotations, fldPath)...) } return allErrs } // ValidateNode tests if required fields in the node are set. -func ValidateNode(node *api.Node) field.ErrorList { +func ValidateNode(node *core.Node) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) @@ -3536,12 +3856,17 @@ func ValidateNode(node *api.Node) field.ErrorList { allErrs = append(allErrs, field.Forbidden(field.NewPath("spec", "configSource"), "configSource may only be set if the DynamicKubeletConfig feature gate is enabled)")) } - // TODO(rjnagal): Ignore PodCIDR till its completely implemented. + if len(node.Spec.PodCIDR) != 0 { + _, err := ValidateCIDR(node.Spec.PodCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(field.NewPath("spec", "podCIDR"), node.Spec.PodCIDR, "not a valid CIDR")) + } + } return allErrs } // ValidateNodeResources is used to make sure a node has valid capacity and allocatable values. -func ValidateNodeResources(node *api.Node) field.ErrorList { +func ValidateNodeResources(node *core.Node) field.ErrorList { allErrs := field.ErrorList{} // Validate resource quantities in capacity. hugePageSizes := sets.NewString() @@ -3573,21 +3898,21 @@ func ValidateNodeResources(node *api.Node) field.ErrorList { } // ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode. -func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { +func ValidateNodeUpdate(node, oldNode *core.Node) field.ErrorList { fldPath := field.NewPath("metadata") allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath) allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...) - // TODO: Enable the code once we have better api object.status update model. Currently, + // TODO: Enable the code once we have better core object.status update model. Currently, // anyone can update node status. - // if !apiequality.Semantic.DeepEqual(node.Status, api.NodeStatus{}) { + // if !apiequality.Semantic.DeepEqual(node.Status, core.NodeStatus{}) { // allErrs = append(allErrs, field.Invalid("status", node.Status, "must be empty")) // } allErrs = append(allErrs, ValidateNodeResources(node)...) // Validate no duplicate addresses in node status. - addresses := make(map[api.NodeAddress]bool) + addresses := make(map[core.NodeAddress]bool) for i, address := range node.Status.Addresses { if _, ok := addresses[address]; ok { allErrs = append(allErrs, field.Duplicate(field.NewPath("status", "addresses").Index(i), address)) @@ -3647,12 +3972,6 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList { // Validate compute resource typename. // Refer to docs/design/resources.md for more details. func validateResourceName(value string, fldPath *field.Path) field.ErrorList { - // Opaque integer resources (OIR) deprecation began in v1.8 - // TODO: Remove warning after OIR deprecation cycle. - if helper.IsOpaqueIntResourceName(api.ResourceName(value)) { - glog.Errorf("DEPRECATION WARNING! Opaque integer resources are deprecated starting with v1.8: %s", value) - } - allErrs := field.ErrorList{} for _, msg := range validation.IsQualifiedName(value) { allErrs = append(allErrs, field.Invalid(fldPath, value, msg)) @@ -3685,8 +4004,8 @@ func validateContainerResourceName(value string, fldPath *field.Path) field.Erro // isLocalStorageResource checks whether the resource is local ephemeral storage func isLocalStorageResource(name string) bool { - if name == string(api.ResourceEphemeralStorage) || name == string(api.ResourceRequestsEphemeralStorage) || - name == string(api.ResourceLimitsEphemeralStorage) { + if name == string(core.ResourceEphemeralStorage) || name == string(core.ResourceRequestsEphemeralStorage) || + name == string(core.ResourceLimitsEphemeralStorage) { return true } else { return false @@ -3729,13 +4048,13 @@ func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorLi // Validate limit range resource name // limit types (other than Pod/Container) could contain storage not just cpu or memory -func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPath *field.Path) field.ErrorList { +func validateLimitRangeResourceName(limitType core.LimitType, value string, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - if value == string(api.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if value == string(core.ResourceEphemeralStorage) && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { return append(allErrs, field.Forbidden(fldPath, "ResourceEphemeralStorage field disabled by feature-gate for Resource LimitRange")) } switch limitType { - case api.LimitTypePod, api.LimitTypeContainer: + case core.LimitTypePod, core.LimitTypeContainer: return validateContainerResourceName(value, fldPath) default: return validateResourceName(value, fldPath) @@ -3743,11 +4062,11 @@ func validateLimitRangeResourceName(limitType api.LimitType, value string, fldPa } // ValidateLimitRange tests if required fields in the LimitRange are set. -func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { +func ValidateLimitRange(limitRange *core.LimitRange) field.ErrorList { allErrs := ValidateObjectMeta(&limitRange.ObjectMeta, true, ValidateLimitRangeName, field.NewPath("metadata")) // ensure resource names are properly qualified per docs/design/resources.md - limitTypeSet := map[api.LimitType]bool{} + limitTypeSet := map[core.LimitType]bool{} fldPath := field.NewPath("spec", "limits") for i := range limitRange.Spec.Limits { idxPath := fldPath.Index(i) @@ -3778,7 +4097,7 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { min[string(k)] = q } - if limit.Type == api.LimitTypePod { + if limit.Type == core.LimitTypePod { if len(limit.Default) > 0 { allErrs = append(allErrs, field.Forbidden(idxPath.Child("default"), "may not be specified when `type` is 'Pod'")) } @@ -3798,9 +4117,9 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { } } - if limit.Type == api.LimitTypePersistentVolumeClaim { - _, minQuantityFound := limit.Min[api.ResourceStorage] - _, maxQuantityFound := limit.Max[api.ResourceStorage] + if limit.Type == core.LimitTypePersistentVolumeClaim { + _, minQuantityFound := limit.Min[core.ResourceStorage] + _, maxQuantityFound := limit.Max[core.ResourceStorage] if !minQuantityFound && !maxQuantityFound { allErrs = append(allErrs, field.Required(idxPath.Child("limits"), "either minimum or maximum storage value is required, but neither was provided")) } @@ -3859,6 +4178,11 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { allErrs = append(allErrs, field.Invalid(idxPath.Child("maxLimitRequestRatio").Key(string(k)), maxRatio, fmt.Sprintf("ratio %s is greater than max/min = %f", maxRatio.String(), maxRatioLimit))) } } + + // for GPU and hugepages, the default value and defaultRequest value must match if both are specified + if !helper.IsOvercommitAllowed(core.ResourceName(k)) && defaultQuantityFound && defaultRequestQuantityFound && defaultQuantity.Cmp(defaultRequestQuantity) != 0 { + allErrs = append(allErrs, field.Invalid(idxPath.Child("defaultRequest").Key(string(k)), defaultRequestQuantity, fmt.Sprintf("default value %s must equal to defaultRequest value %s in %s", defaultQuantity.String(), defaultRequestQuantity.String(), k))) + } } } @@ -3866,20 +4190,20 @@ func ValidateLimitRange(limitRange *api.LimitRange) field.ErrorList { } // ValidateServiceAccount tests if required fields in the ServiceAccount are set. -func ValidateServiceAccount(serviceAccount *api.ServiceAccount) field.ErrorList { +func ValidateServiceAccount(serviceAccount *core.ServiceAccount) field.ErrorList { allErrs := ValidateObjectMeta(&serviceAccount.ObjectMeta, true, ValidateServiceAccountName, field.NewPath("metadata")) return allErrs } // ValidateServiceAccountUpdate tests if required fields in the ServiceAccount are set. -func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *api.ServiceAccount) field.ErrorList { +func ValidateServiceAccountUpdate(newServiceAccount, oldServiceAccount *core.ServiceAccount) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newServiceAccount.ObjectMeta, &oldServiceAccount.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateServiceAccount(newServiceAccount)...) return allErrs } // ValidateSecret tests if required fields in the Secret are set. -func ValidateSecret(secret *api.Secret) field.ErrorList { +func ValidateSecret(secret *core.Secret) field.ErrorList { allErrs := ValidateObjectMeta(&secret.ObjectMeta, true, ValidateSecretName, field.NewPath("metadata")) dataPath := field.NewPath("data") @@ -3890,63 +4214,63 @@ func ValidateSecret(secret *api.Secret) field.ErrorList { } totalSize += len(value) } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(dataPath, "", api.MaxSecretSize)) + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(dataPath, "", core.MaxSecretSize)) } switch secret.Type { - case api.SecretTypeServiceAccountToken: + case core.SecretTypeServiceAccountToken: // Only require Annotations[kubernetes.io/service-account.name] // Additional fields (like Annotations[kubernetes.io/service-account.uid] and Data[token]) might be contributed later by a controller loop - if value := secret.Annotations[api.ServiceAccountNameKey]; len(value) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(api.ServiceAccountNameKey), "")) + if value := secret.Annotations[core.ServiceAccountNameKey]; len(value) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("metadata", "annotations").Key(core.ServiceAccountNameKey), "")) } - case api.SecretTypeOpaque, "": + case core.SecretTypeOpaque, "": // no-op - case api.SecretTypeDockercfg: - dockercfgBytes, exists := secret.Data[api.DockerConfigKey] + case core.SecretTypeDockercfg: + dockercfgBytes, exists := secret.Data[core.DockerConfigKey] if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigKey), "")) + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigKey), "")) break } // make sure that the content is well-formed json. if err := json.Unmarshal(dockercfgBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigKey), "", err.Error())) + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigKey), "", err.Error())) } - case api.SecretTypeDockerConfigJson: - dockerConfigJsonBytes, exists := secret.Data[api.DockerConfigJsonKey] + case core.SecretTypeDockerConfigJson: + dockerConfigJsonBytes, exists := secret.Data[core.DockerConfigJsonKey] if !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.DockerConfigJsonKey), "")) + allErrs = append(allErrs, field.Required(dataPath.Key(core.DockerConfigJsonKey), "")) break } // make sure that the content is well-formed json. if err := json.Unmarshal(dockerConfigJsonBytes, &map[string]interface{}{}); err != nil { - allErrs = append(allErrs, field.Invalid(dataPath.Key(api.DockerConfigJsonKey), "", err.Error())) + allErrs = append(allErrs, field.Invalid(dataPath.Key(core.DockerConfigJsonKey), "", err.Error())) } - case api.SecretTypeBasicAuth: - _, usernameFieldExists := secret.Data[api.BasicAuthUsernameKey] - _, passwordFieldExists := secret.Data[api.BasicAuthPasswordKey] + case core.SecretTypeBasicAuth: + _, usernameFieldExists := secret.Data[core.BasicAuthUsernameKey] + _, passwordFieldExists := secret.Data[core.BasicAuthPasswordKey] // username or password might be empty, but the field must be present if !usernameFieldExists && !passwordFieldExists { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthUsernameKey), "")) - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.BasicAuthPasswordKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthUsernameKey), "")) + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.BasicAuthPasswordKey), "")) break } - case api.SecretTypeSSHAuth: - if len(secret.Data[api.SSHAuthPrivateKey]) == 0 { - allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(api.SSHAuthPrivateKey), "")) + case core.SecretTypeSSHAuth: + if len(secret.Data[core.SSHAuthPrivateKey]) == 0 { + allErrs = append(allErrs, field.Required(field.NewPath("data[%s]").Key(core.SSHAuthPrivateKey), "")) break } - case api.SecretTypeTLS: - if _, exists := secret.Data[api.TLSCertKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSCertKey), "")) + case core.SecretTypeTLS: + if _, exists := secret.Data[core.TLSCertKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSCertKey), "")) } - if _, exists := secret.Data[api.TLSPrivateKeyKey]; !exists { - allErrs = append(allErrs, field.Required(dataPath.Key(api.TLSPrivateKeyKey), "")) + if _, exists := secret.Data[core.TLSPrivateKeyKey]; !exists { + allErrs = append(allErrs, field.Required(dataPath.Key(core.TLSPrivateKeyKey), "")) } // TODO: Verify that the key matches the cert. default: @@ -3957,7 +4281,7 @@ func ValidateSecret(secret *api.Secret) field.ErrorList { } // ValidateSecretUpdate tests if required fields in the Secret are set. -func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { +func ValidateSecretUpdate(newSecret, oldSecret *core.Secret) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newSecret.ObjectMeta, &oldSecret.ObjectMeta, field.NewPath("metadata")) if len(newSecret.Type) == 0 { @@ -3976,7 +4300,7 @@ func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList { var ValidateConfigMapName = NameIsDNSSubdomain // ValidateConfigMap tests whether required fields in the ConfigMap are set. -func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { +func ValidateConfigMap(cfg *core.ConfigMap) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateObjectMeta(&cfg.ObjectMeta, true, ValidateConfigMapName, field.NewPath("metadata"))...) @@ -3988,15 +4312,15 @@ func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList { } totalSize += len(value) } - if totalSize > api.MaxSecretSize { - allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", api.MaxSecretSize)) + if totalSize > core.MaxSecretSize { + allErrs = append(allErrs, field.TooLong(field.NewPath("data"), "", core.MaxSecretSize)) } return allErrs } // ValidateConfigMapUpdate tests if required fields in the ConfigMap are set. -func ValidateConfigMapUpdate(newCfg, oldCfg *api.ConfigMap) field.ErrorList { +func ValidateConfigMapUpdate(newCfg, oldCfg *core.ConfigMap) field.ErrorList { allErrs := field.ErrorList{} allErrs = append(allErrs, ValidateObjectMetaUpdate(&newCfg.ObjectMeta, &oldCfg.ObjectMeta, field.NewPath("metadata"))...) allErrs = append(allErrs, ValidateConfigMap(newCfg)...) @@ -4012,7 +4336,7 @@ func validateBasicResource(quantity resource.Quantity, fldPath *field.Path) fiel } // Validates resource requirement spec. -func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPath *field.Path) field.ErrorList { +func ValidateResourceRequirements(requirements *core.ResourceRequirements, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} limPath := fldPath.Child("limits") reqPath := fldPath.Child("requests") @@ -4024,7 +4348,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat // Validate resource quantity. allErrs = append(allErrs, ValidateResourceQuantityValue(string(resourceName), quantity, fldPath)...) - if resourceName == api.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { + if resourceName == core.ResourceEphemeralStorage && !utilfeature.DefaultFeatureGate.Enabled(features.LocalStorageCapacityIsolation) { allErrs = append(allErrs, field.Forbidden(limPath, "ResourceEphemeralStorage field disabled by feature-gate for ResourceRequirements")) } if helper.IsHugePageResourceName(resourceName) && !utilfeature.DefaultFeatureGate.Enabled(features.HugePages) { @@ -4044,12 +4368,12 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat if exists { // For GPUs, not only requests can't exceed limits, they also can't be lower, i.e. must be equal. if quantity.Cmp(limitQuantity) != 0 && !helper.IsOvercommitAllowed(resourceName) { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", api.ResourceNvidiaGPU))) + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s limit", resourceName))) } else if quantity.Cmp(limitQuantity) > 0 { allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be less than or equal to %s limit", resourceName))) } - } else if resourceName == api.ResourceNvidiaGPU { - allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", api.ResourceNvidiaGPU))) + } else if resourceName == core.ResourceNvidiaGPU { + allErrs = append(allErrs, field.Invalid(reqPath, quantity.String(), fmt.Sprintf("must be equal to %s request", core.ResourceNvidiaGPU))) } } @@ -4057,7 +4381,7 @@ func ValidateResourceRequirements(requirements *api.ResourceRequirements, fldPat } // validateResourceQuotaScopes ensures that each enumerated hard resource constraint is valid for set of scopes -func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { +func validateResourceQuotaScopes(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} if len(resourceQuotaSpec.Scopes) == 0 { return allErrs @@ -4080,8 +4404,8 @@ func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld * scopeSet.Insert(string(scope)) } invalidScopePairs := []sets.String{ - sets.NewString(string(api.ResourceQuotaScopeBestEffort), string(api.ResourceQuotaScopeNotBestEffort)), - sets.NewString(string(api.ResourceQuotaScopeTerminating), string(api.ResourceQuotaScopeNotTerminating)), + sets.NewString(string(core.ResourceQuotaScopeBestEffort), string(core.ResourceQuotaScopeNotBestEffort)), + sets.NewString(string(core.ResourceQuotaScopeTerminating), string(core.ResourceQuotaScopeNotTerminating)), } for _, invalidScopePair := range invalidScopePairs { if scopeSet.HasAll(invalidScopePair.List()...) { @@ -4092,7 +4416,7 @@ func validateResourceQuotaScopes(resourceQuotaSpec *api.ResourceQuotaSpec, fld * } // ValidateResourceQuota tests if required fields in the ResourceQuota are set. -func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuota(resourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMeta(&resourceQuota.ObjectMeta, true, ValidateResourceQuotaName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateResourceQuotaSpec(&resourceQuota.Spec, field.NewPath("spec"))...) @@ -4101,7 +4425,7 @@ func ValidateResourceQuota(resourceQuota *api.ResourceQuota) field.ErrorList { return allErrs } -func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Path) field.ErrorList { +func ValidateResourceQuotaStatus(status *core.ResourceQuotaStatus, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} fldPath := fld.Child("hard") @@ -4120,7 +4444,7 @@ func ValidateResourceQuotaStatus(status *api.ResourceQuotaStatus, fld *field.Pat return allErrs } -func ValidateResourceQuotaSpec(resourceQuotaSpec *api.ResourceQuotaSpec, fld *field.Path) field.ErrorList { +func ValidateResourceQuotaSpec(resourceQuotaSpec *core.ResourceQuotaSpec, fld *field.Path) field.ErrorList { allErrs := field.ErrorList{} fldPath := fld.Child("hard") @@ -4148,7 +4472,7 @@ func ValidateResourceQuantityValue(resource string, value resource.Quantity, fld // ValidateResourceQuotaUpdate tests to see if the update is legal for an end user to make. // newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, ValidateResourceQuotaSpec(&newResourceQuota.Spec, field.NewPath("spec"))...) @@ -4172,7 +4496,7 @@ func ValidateResourceQuotaUpdate(newResourceQuota, oldResourceQuota *api.Resourc // ValidateResourceQuotaStatusUpdate tests to see if the status update is legal for an end user to make. // newResourceQuota is updated with fields that cannot be changed. -func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.ResourceQuota) field.ErrorList { +func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *core.ResourceQuota) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newResourceQuota.ObjectMeta, &oldResourceQuota.ObjectMeta, field.NewPath("metadata")) if len(newResourceQuota.ResourceVersion) == 0 { allErrs = append(allErrs, field.Required(field.NewPath("resourceVersion"), "")) @@ -4194,7 +4518,7 @@ func ValidateResourceQuotaStatusUpdate(newResourceQuota, oldResourceQuota *api.R } // ValidateNamespace tests if required fields are set. -func ValidateNamespace(namespace *api.Namespace) field.ErrorList { +func ValidateNamespace(namespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMeta(&namespace.ObjectMeta, false, ValidateNamespaceName, field.NewPath("metadata")) for i := range namespace.Spec.Finalizers { allErrs = append(allErrs, validateFinalizerName(string(namespace.Spec.Finalizers[i]), field.NewPath("spec", "finalizers"))...) @@ -4204,7 +4528,7 @@ func ValidateNamespace(namespace *api.Namespace) field.ErrorList { // Validate finalizer names func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList { - allErrs := genericvalidation.ValidateFinalizerName(stringValue, fldPath) + allErrs := apimachineryvalidation.ValidateFinalizerName(stringValue, fldPath) for _, err := range validateKubeFinalizerName(stringValue, fldPath) { allErrs = append(allErrs, err) } @@ -4226,7 +4550,7 @@ func validateKubeFinalizerName(stringValue string, fldPath *field.Path) field.Er // ValidateNamespaceUpdate tests to make sure a namespace update can be applied. // newNamespace is updated with fields that cannot be changed -func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceUpdate(newNamespace *core.Namespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) newNamespace.Spec.Finalizers = oldNamespace.Spec.Finalizers newNamespace.Status = oldNamespace.Status @@ -4235,15 +4559,15 @@ func ValidateNamespaceUpdate(newNamespace *api.Namespace, oldNamespace *api.Name // ValidateNamespaceStatusUpdate tests to see if the update is legal for an end user to make. newNamespace is updated with fields // that cannot be changed. -func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) newNamespace.Spec = oldNamespace.Spec if newNamespace.DeletionTimestamp.IsZero() { - if newNamespace.Status.Phase != api.NamespaceActive { + if newNamespace.Status.Phase != core.NamespaceActive { allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Active' if `deletionTimestamp` is empty")) } } else { - if newNamespace.Status.Phase != api.NamespaceTerminating { + if newNamespace.Status.Phase != core.NamespaceTerminating { allErrs = append(allErrs, field.Invalid(field.NewPath("status", "Phase"), newNamespace.Status.Phase, "may only be 'Terminating' if `deletionTimestamp` is not empty")) } } @@ -4252,7 +4576,7 @@ func ValidateNamespaceStatusUpdate(newNamespace, oldNamespace *api.Namespace) fi // ValidateNamespaceFinalizeUpdate tests to see if the update is legal for an end user to make. // newNamespace is updated with fields that cannot be changed. -func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) field.ErrorList { +func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *core.Namespace) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newNamespace.ObjectMeta, &oldNamespace.ObjectMeta, field.NewPath("metadata")) fldPath := field.NewPath("spec", "finalizers") @@ -4265,7 +4589,7 @@ func ValidateNamespaceFinalizeUpdate(newNamespace, oldNamespace *api.Namespace) } // Construct lookup map of old subset IPs to NodeNames. -func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.EndpointAddress) { +func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []core.EndpointAddress) { for n := range addresses { if addresses[n].NodeName == nil { continue @@ -4275,7 +4599,7 @@ func updateEpAddrToNodeNameMap(ipToNodeName map[string]string, addresses []api.E } // Build a map across all subsets of IP -> NodeName -func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]string { +func buildEndpointAddressNodeNameMap(subsets []core.EndpointSubset) map[string]string { ipToNodeName := make(map[string]string) for i := range subsets { updateEpAddrToNodeNameMap(ipToNodeName, subsets[i].Addresses) @@ -4284,7 +4608,7 @@ func buildEndpointAddressNodeNameMap(subsets []api.EndpointSubset) map[string]st return ipToNodeName } -func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { +func validateEpAddrNodeNameTransition(addr *core.EndpointAddress, ipToNodeName map[string]string, fldPath *field.Path) field.ErrorList { errList := field.ErrorList{} existingNodeName, found := ipToNodeName[addr.IP] if !found { @@ -4298,14 +4622,14 @@ func validateEpAddrNodeNameTransition(addr *api.EndpointAddress, ipToNodeName ma } // ValidateEndpoints tests if required fields are set. -func ValidateEndpoints(endpoints *api.Endpoints) field.ErrorList { +func ValidateEndpoints(endpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMeta(&endpoints.ObjectMeta, true, ValidateEndpointsName, field.NewPath("metadata")) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(endpoints.Annotations, field.NewPath("annotations"))...) - allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []api.EndpointSubset{}, field.NewPath("subsets"))...) + allErrs = append(allErrs, validateEndpointSubsets(endpoints.Subsets, []core.EndpointSubset{}, field.NewPath("subsets"))...) return allErrs } -func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.EndpointSubset, fldPath *field.Path) field.ErrorList { +func validateEndpointSubsets(subsets []core.EndpointSubset, oldSubsets []core.EndpointSubset, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} ipToNodeName := buildEndpointAddressNodeNameMap(oldSubsets) for i := range subsets { @@ -4331,7 +4655,7 @@ func validateEndpointSubsets(subsets []api.EndpointSubset, oldSubsets []api.Endp return allErrs } -func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { +func validateEndpointAddress(address *core.EndpointAddress, fldPath *field.Path, ipToNodeName map[string]string) field.ErrorList { allErrs := field.ErrorList{} for _, msg := range validation.IsValidIP(address.IP) { allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, msg)) @@ -4379,7 +4703,7 @@ func validateNonSpecialIP(ipAddress string, fldPath *field.Path) field.ErrorList return allErrs } -func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { +func validateEndpointPort(port *core.EndpointPort, requireName bool, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} if requireName && len(port.Name) == 0 { allErrs = append(allErrs, field.Required(fldPath.Child("name"), "")) @@ -4398,7 +4722,7 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie } // ValidateEndpointsUpdate tests to make sure an endpoints update can be applied. -func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.ErrorList { +func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *core.Endpoints) field.ErrorList { allErrs := ValidateObjectMetaUpdate(&newEndpoints.ObjectMeta, &oldEndpoints.ObjectMeta, field.NewPath("metadata")) allErrs = append(allErrs, validateEndpointSubsets(newEndpoints.Subsets, oldEndpoints.Subsets, field.NewPath("subsets"))...) allErrs = append(allErrs, ValidateEndpointsSpecificAnnotations(newEndpoints.Annotations, field.NewPath("annotations"))...) @@ -4406,9 +4730,9 @@ func ValidateEndpointsUpdate(newEndpoints, oldEndpoints *api.Endpoints) field.Er } // ValidateSecurityContext ensure the security context contains valid settings -func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field.ErrorList { +func ValidateSecurityContext(sc *core.SecurityContext, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} - //this should only be true for testing since SecurityContext is defaulted by the api + //this should only be true for testing since SecurityContext is defaulted by the core if sc == nil { return allErrs } @@ -4442,7 +4766,7 @@ func ValidateSecurityContext(sc *api.SecurityContext, fldPath *field.Path) field return allErrs } -func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { +func ValidatePodLogOptions(opts *core.PodLogOptions) field.ErrorList { allErrs := field.ErrorList{} if opts.TailLines != nil && *opts.TailLines < 0 { allErrs = append(allErrs, field.Invalid(field.NewPath("tailLines"), *opts.TailLines, isNegativeErrorMsg)) @@ -4462,7 +4786,7 @@ func ValidatePodLogOptions(opts *api.PodLogOptions) field.ErrorList { } // ValidateLoadBalancerStatus validates required fields on a LoadBalancerStatus -func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { +func ValidateLoadBalancerStatus(status *core.LoadBalancerStatus, fldPath *field.Path) field.ErrorList { allErrs := field.ErrorList{} for i, ingress := range status.Ingress { idxPath := fldPath.Child("ingress").Index(i) @@ -4483,7 +4807,7 @@ func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.P return allErrs } -func sysctlIntersection(a []api.Sysctl, b []api.Sysctl) []string { +func sysctlIntersection(a []core.Sysctl, b []core.Sysctl) []string { lookup := make(map[string]struct{}, len(a)) result := []string{} for i := range a { @@ -4503,7 +4827,7 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat na, err := helper.GetStorageNodeAffinityFromAnnotation(annotations) if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath, api.AlphaStorageNodeAffinityAnnotation, err.Error())) + allErrs = append(allErrs, field.Invalid(fldPath, core.AlphaStorageNodeAffinityAnnotation, err.Error())) return false, allErrs } if na == nil { @@ -4525,3 +4849,12 @@ func validateStorageNodeAffinityAnnotation(annotations map[string]string, fldPat } return policySpecified, allErrs } + +// ValidateCIDR validates whether a CIDR matches the conventions expected by net.ParseCIDR +func ValidateCIDR(cidr string) (*net.IPNet, error) { + _, net, err := net.ParseCIDR(cidr) + if err != nil { + return nil, err + } + return net, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go similarity index 77% rename from vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go rename to vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go index 1b5c7962..186760a1 100644 --- a/vendor/k8s.io/kubernetes/pkg/api/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/core/zz_generated.deepcopy.go @@ -18,750 +18,15 @@ limitations under the License. // This file was autogenerated by deepcopy-gen. Do not edit it manually! -package api +package core import ( resource "k8s.io/apimachinery/pkg/api/resource" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" types "k8s.io/apimachinery/pkg/types" - reflect "reflect" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AWSElasticBlockStoreVolumeSource).DeepCopyInto(out.(*AWSElasticBlockStoreVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AWSElasticBlockStoreVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Affinity).DeepCopyInto(out.(*Affinity)) - return nil - }, InType: reflect.TypeOf(&Affinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AttachedVolume).DeepCopyInto(out.(*AttachedVolume)) - return nil - }, InType: reflect.TypeOf(&AttachedVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AvoidPods).DeepCopyInto(out.(*AvoidPods)) - return nil - }, InType: reflect.TypeOf(&AvoidPods{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureDiskVolumeSource).DeepCopyInto(out.(*AzureDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFilePersistentVolumeSource).DeepCopyInto(out.(*AzureFilePersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFilePersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AzureFileVolumeSource).DeepCopyInto(out.(*AzureFileVolumeSource)) - return nil - }, InType: reflect.TypeOf(&AzureFileVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Binding).DeepCopyInto(out.(*Binding)) - return nil - }, InType: reflect.TypeOf(&Binding{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Capabilities).DeepCopyInto(out.(*Capabilities)) - return nil - }, InType: reflect.TypeOf(&Capabilities{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSPersistentVolumeSource).DeepCopyInto(out.(*CephFSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CephFSVolumeSource).DeepCopyInto(out.(*CephFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CephFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CinderVolumeSource).DeepCopyInto(out.(*CinderVolumeSource)) - return nil - }, InType: reflect.TypeOf(&CinderVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ClientIPConfig).DeepCopyInto(out.(*ClientIPConfig)) - return nil - }, InType: reflect.TypeOf(&ClientIPConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentCondition).DeepCopyInto(out.(*ComponentCondition)) - return nil - }, InType: reflect.TypeOf(&ComponentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatus).DeepCopyInto(out.(*ComponentStatus)) - return nil - }, InType: reflect.TypeOf(&ComponentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ComponentStatusList).DeepCopyInto(out.(*ComponentStatusList)) - return nil - }, InType: reflect.TypeOf(&ComponentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMap).DeepCopyInto(out.(*ConfigMap)) - return nil - }, InType: reflect.TypeOf(&ConfigMap{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapEnvSource).DeepCopyInto(out.(*ConfigMapEnvSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapKeySelector).DeepCopyInto(out.(*ConfigMapKeySelector)) - return nil - }, InType: reflect.TypeOf(&ConfigMapKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapList).DeepCopyInto(out.(*ConfigMapList)) - return nil - }, InType: reflect.TypeOf(&ConfigMapList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapProjection).DeepCopyInto(out.(*ConfigMapProjection)) - return nil - }, InType: reflect.TypeOf(&ConfigMapProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ConfigMapVolumeSource).DeepCopyInto(out.(*ConfigMapVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ConfigMapVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Container).DeepCopyInto(out.(*Container)) - return nil - }, InType: reflect.TypeOf(&Container{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerImage).DeepCopyInto(out.(*ContainerImage)) - return nil - }, InType: reflect.TypeOf(&ContainerImage{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerPort).DeepCopyInto(out.(*ContainerPort)) - return nil - }, InType: reflect.TypeOf(&ContainerPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerState).DeepCopyInto(out.(*ContainerState)) - return nil - }, InType: reflect.TypeOf(&ContainerState{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateRunning).DeepCopyInto(out.(*ContainerStateRunning)) - return nil - }, InType: reflect.TypeOf(&ContainerStateRunning{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateTerminated).DeepCopyInto(out.(*ContainerStateTerminated)) - return nil - }, InType: reflect.TypeOf(&ContainerStateTerminated{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStateWaiting).DeepCopyInto(out.(*ContainerStateWaiting)) - return nil - }, InType: reflect.TypeOf(&ContainerStateWaiting{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ContainerStatus).DeepCopyInto(out.(*ContainerStatus)) - return nil - }, InType: reflect.TypeOf(&ContainerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonEndpoint).DeepCopyInto(out.(*DaemonEndpoint)) - return nil - }, InType: reflect.TypeOf(&DaemonEndpoint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeleteOptions).DeepCopyInto(out.(*DeleteOptions)) - return nil - }, InType: reflect.TypeOf(&DeleteOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIProjection).DeepCopyInto(out.(*DownwardAPIProjection)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeFile).DeepCopyInto(out.(*DownwardAPIVolumeFile)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeFile{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DownwardAPIVolumeSource).DeepCopyInto(out.(*DownwardAPIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&DownwardAPIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EmptyDirVolumeSource).DeepCopyInto(out.(*EmptyDirVolumeSource)) - return nil - }, InType: reflect.TypeOf(&EmptyDirVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointAddress).DeepCopyInto(out.(*EndpointAddress)) - return nil - }, InType: reflect.TypeOf(&EndpointAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointPort).DeepCopyInto(out.(*EndpointPort)) - return nil - }, InType: reflect.TypeOf(&EndpointPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointSubset).DeepCopyInto(out.(*EndpointSubset)) - return nil - }, InType: reflect.TypeOf(&EndpointSubset{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Endpoints).DeepCopyInto(out.(*Endpoints)) - return nil - }, InType: reflect.TypeOf(&Endpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EndpointsList).DeepCopyInto(out.(*EndpointsList)) - return nil - }, InType: reflect.TypeOf(&EndpointsList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvFromSource).DeepCopyInto(out.(*EnvFromSource)) - return nil - }, InType: reflect.TypeOf(&EnvFromSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVar).DeepCopyInto(out.(*EnvVar)) - return nil - }, InType: reflect.TypeOf(&EnvVar{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EnvVarSource).DeepCopyInto(out.(*EnvVarSource)) - return nil - }, InType: reflect.TypeOf(&EnvVarSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Event).DeepCopyInto(out.(*Event)) - return nil - }, InType: reflect.TypeOf(&Event{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventList).DeepCopyInto(out.(*EventList)) - return nil - }, InType: reflect.TypeOf(&EventList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*EventSource).DeepCopyInto(out.(*EventSource)) - return nil - }, InType: reflect.TypeOf(&EventSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ExecAction).DeepCopyInto(out.(*ExecAction)) - return nil - }, InType: reflect.TypeOf(&ExecAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FCVolumeSource).DeepCopyInto(out.(*FCVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FCVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlexVolumeSource).DeepCopyInto(out.(*FlexVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlexVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FlockerVolumeSource).DeepCopyInto(out.(*FlockerVolumeSource)) - return nil - }, InType: reflect.TypeOf(&FlockerVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GCEPersistentDiskVolumeSource).DeepCopyInto(out.(*GCEPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GCEPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GitRepoVolumeSource).DeepCopyInto(out.(*GitRepoVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GitRepoVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GlusterfsVolumeSource).DeepCopyInto(out.(*GlusterfsVolumeSource)) - return nil - }, InType: reflect.TypeOf(&GlusterfsVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPGetAction).DeepCopyInto(out.(*HTTPGetAction)) - return nil - }, InType: reflect.TypeOf(&HTTPGetAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPHeader).DeepCopyInto(out.(*HTTPHeader)) - return nil - }, InType: reflect.TypeOf(&HTTPHeader{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Handler).DeepCopyInto(out.(*Handler)) - return nil - }, InType: reflect.TypeOf(&Handler{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostAlias).DeepCopyInto(out.(*HostAlias)) - return nil - }, InType: reflect.TypeOf(&HostAlias{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPathVolumeSource).DeepCopyInto(out.(*HostPathVolumeSource)) - return nil - }, InType: reflect.TypeOf(&HostPathVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ISCSIVolumeSource).DeepCopyInto(out.(*ISCSIVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ISCSIVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*KeyToPath).DeepCopyInto(out.(*KeyToPath)) - return nil - }, InType: reflect.TypeOf(&KeyToPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Lifecycle).DeepCopyInto(out.(*Lifecycle)) - return nil - }, InType: reflect.TypeOf(&Lifecycle{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRange).DeepCopyInto(out.(*LimitRange)) - return nil - }, InType: reflect.TypeOf(&LimitRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeItem).DeepCopyInto(out.(*LimitRangeItem)) - return nil - }, InType: reflect.TypeOf(&LimitRangeItem{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeList).DeepCopyInto(out.(*LimitRangeList)) - return nil - }, InType: reflect.TypeOf(&LimitRangeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LimitRangeSpec).DeepCopyInto(out.(*LimitRangeSpec)) - return nil - }, InType: reflect.TypeOf(&LimitRangeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*List).DeepCopyInto(out.(*List)) - return nil - }, InType: reflect.TypeOf(&List{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ListOptions).DeepCopyInto(out.(*ListOptions)) - return nil - }, InType: reflect.TypeOf(&ListOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerIngress).DeepCopyInto(out.(*LoadBalancerIngress)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerIngress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LoadBalancerStatus).DeepCopyInto(out.(*LoadBalancerStatus)) - return nil - }, InType: reflect.TypeOf(&LoadBalancerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalObjectReference).DeepCopyInto(out.(*LocalObjectReference)) - return nil - }, InType: reflect.TypeOf(&LocalObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*LocalVolumeSource).DeepCopyInto(out.(*LocalVolumeSource)) - return nil - }, InType: reflect.TypeOf(&LocalVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NFSVolumeSource).DeepCopyInto(out.(*NFSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&NFSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Namespace).DeepCopyInto(out.(*Namespace)) - return nil - }, InType: reflect.TypeOf(&Namespace{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceList).DeepCopyInto(out.(*NamespaceList)) - return nil - }, InType: reflect.TypeOf(&NamespaceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceSpec).DeepCopyInto(out.(*NamespaceSpec)) - return nil - }, InType: reflect.TypeOf(&NamespaceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NamespaceStatus).DeepCopyInto(out.(*NamespaceStatus)) - return nil - }, InType: reflect.TypeOf(&NamespaceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Node).DeepCopyInto(out.(*Node)) - return nil - }, InType: reflect.TypeOf(&Node{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAddress).DeepCopyInto(out.(*NodeAddress)) - return nil - }, InType: reflect.TypeOf(&NodeAddress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeAffinity).DeepCopyInto(out.(*NodeAffinity)) - return nil - }, InType: reflect.TypeOf(&NodeAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeCondition).DeepCopyInto(out.(*NodeCondition)) - return nil - }, InType: reflect.TypeOf(&NodeCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeConfigSource).DeepCopyInto(out.(*NodeConfigSource)) - return nil - }, InType: reflect.TypeOf(&NodeConfigSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeDaemonEndpoints).DeepCopyInto(out.(*NodeDaemonEndpoints)) - return nil - }, InType: reflect.TypeOf(&NodeDaemonEndpoints{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeList).DeepCopyInto(out.(*NodeList)) - return nil - }, InType: reflect.TypeOf(&NodeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeProxyOptions).DeepCopyInto(out.(*NodeProxyOptions)) - return nil - }, InType: reflect.TypeOf(&NodeProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeResources).DeepCopyInto(out.(*NodeResources)) - return nil - }, InType: reflect.TypeOf(&NodeResources{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelector).DeepCopyInto(out.(*NodeSelector)) - return nil - }, InType: reflect.TypeOf(&NodeSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorRequirement).DeepCopyInto(out.(*NodeSelectorRequirement)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorRequirement{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSelectorTerm).DeepCopyInto(out.(*NodeSelectorTerm)) - return nil - }, InType: reflect.TypeOf(&NodeSelectorTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSpec).DeepCopyInto(out.(*NodeSpec)) - return nil - }, InType: reflect.TypeOf(&NodeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeStatus).DeepCopyInto(out.(*NodeStatus)) - return nil - }, InType: reflect.TypeOf(&NodeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NodeSystemInfo).DeepCopyInto(out.(*NodeSystemInfo)) - return nil - }, InType: reflect.TypeOf(&NodeSystemInfo{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectFieldSelector).DeepCopyInto(out.(*ObjectFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ObjectFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectMeta).DeepCopyInto(out.(*ObjectMeta)) - return nil - }, InType: reflect.TypeOf(&ObjectMeta{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ObjectReference).DeepCopyInto(out.(*ObjectReference)) - return nil - }, InType: reflect.TypeOf(&ObjectReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolume).DeepCopyInto(out.(*PersistentVolume)) - return nil - }, InType: reflect.TypeOf(&PersistentVolume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaim).DeepCopyInto(out.(*PersistentVolumeClaim)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaim{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimCondition).DeepCopyInto(out.(*PersistentVolumeClaimCondition)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimList).DeepCopyInto(out.(*PersistentVolumeClaimList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimSpec).DeepCopyInto(out.(*PersistentVolumeClaimSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimStatus).DeepCopyInto(out.(*PersistentVolumeClaimStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeClaimVolumeSource).DeepCopyInto(out.(*PersistentVolumeClaimVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeClaimVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeList).DeepCopyInto(out.(*PersistentVolumeList)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSource).DeepCopyInto(out.(*PersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeSpec).DeepCopyInto(out.(*PersistentVolumeSpec)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PersistentVolumeStatus).DeepCopyInto(out.(*PersistentVolumeStatus)) - return nil - }, InType: reflect.TypeOf(&PersistentVolumeStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PhotonPersistentDiskVolumeSource).DeepCopyInto(out.(*PhotonPersistentDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PhotonPersistentDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Pod).DeepCopyInto(out.(*Pod)) - return nil - }, InType: reflect.TypeOf(&Pod{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinity).DeepCopyInto(out.(*PodAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAffinityTerm).DeepCopyInto(out.(*PodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&PodAffinityTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAntiAffinity).DeepCopyInto(out.(*PodAntiAffinity)) - return nil - }, InType: reflect.TypeOf(&PodAntiAffinity{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodAttachOptions).DeepCopyInto(out.(*PodAttachOptions)) - return nil - }, InType: reflect.TypeOf(&PodAttachOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodCondition).DeepCopyInto(out.(*PodCondition)) - return nil - }, InType: reflect.TypeOf(&PodCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodExecOptions).DeepCopyInto(out.(*PodExecOptions)) - return nil - }, InType: reflect.TypeOf(&PodExecOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodList).DeepCopyInto(out.(*PodList)) - return nil - }, InType: reflect.TypeOf(&PodList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodLogOptions).DeepCopyInto(out.(*PodLogOptions)) - return nil - }, InType: reflect.TypeOf(&PodLogOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodPortForwardOptions).DeepCopyInto(out.(*PodPortForwardOptions)) - return nil - }, InType: reflect.TypeOf(&PodPortForwardOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodProxyOptions).DeepCopyInto(out.(*PodProxyOptions)) - return nil - }, InType: reflect.TypeOf(&PodProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityContext).DeepCopyInto(out.(*PodSecurityContext)) - return nil - }, InType: reflect.TypeOf(&PodSecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSignature).DeepCopyInto(out.(*PodSignature)) - return nil - }, InType: reflect.TypeOf(&PodSignature{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSpec).DeepCopyInto(out.(*PodSpec)) - return nil - }, InType: reflect.TypeOf(&PodSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatus).DeepCopyInto(out.(*PodStatus)) - return nil - }, InType: reflect.TypeOf(&PodStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodStatusResult).DeepCopyInto(out.(*PodStatusResult)) - return nil - }, InType: reflect.TypeOf(&PodStatusResult{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplate).DeepCopyInto(out.(*PodTemplate)) - return nil - }, InType: reflect.TypeOf(&PodTemplate{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateList).DeepCopyInto(out.(*PodTemplateList)) - return nil - }, InType: reflect.TypeOf(&PodTemplateList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodTemplateSpec).DeepCopyInto(out.(*PodTemplateSpec)) - return nil - }, InType: reflect.TypeOf(&PodTemplateSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PortworxVolumeSource).DeepCopyInto(out.(*PortworxVolumeSource)) - return nil - }, InType: reflect.TypeOf(&PortworxVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Preconditions).DeepCopyInto(out.(*Preconditions)) - return nil - }, InType: reflect.TypeOf(&Preconditions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferAvoidPodsEntry).DeepCopyInto(out.(*PreferAvoidPodsEntry)) - return nil - }, InType: reflect.TypeOf(&PreferAvoidPodsEntry{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PreferredSchedulingTerm).DeepCopyInto(out.(*PreferredSchedulingTerm)) - return nil - }, InType: reflect.TypeOf(&PreferredSchedulingTerm{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Probe).DeepCopyInto(out.(*Probe)) - return nil - }, InType: reflect.TypeOf(&Probe{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ProjectedVolumeSource).DeepCopyInto(out.(*ProjectedVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ProjectedVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*QuobyteVolumeSource).DeepCopyInto(out.(*QuobyteVolumeSource)) - return nil - }, InType: reflect.TypeOf(&QuobyteVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDPersistentVolumeSource).DeepCopyInto(out.(*RBDPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RBDVolumeSource).DeepCopyInto(out.(*RBDVolumeSource)) - return nil - }, InType: reflect.TypeOf(&RBDVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RangeAllocation).DeepCopyInto(out.(*RangeAllocation)) - return nil - }, InType: reflect.TypeOf(&RangeAllocation{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationController).DeepCopyInto(out.(*ReplicationController)) - return nil - }, InType: reflect.TypeOf(&ReplicationController{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerCondition).DeepCopyInto(out.(*ReplicationControllerCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerList).DeepCopyInto(out.(*ReplicationControllerList)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerSpec).DeepCopyInto(out.(*ReplicationControllerSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerStatus).DeepCopyInto(out.(*ReplicationControllerStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceFieldSelector).DeepCopyInto(out.(*ResourceFieldSelector)) - return nil - }, InType: reflect.TypeOf(&ResourceFieldSelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuota).DeepCopyInto(out.(*ResourceQuota)) - return nil - }, InType: reflect.TypeOf(&ResourceQuota{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaList).DeepCopyInto(out.(*ResourceQuotaList)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaSpec).DeepCopyInto(out.(*ResourceQuotaSpec)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceQuotaStatus).DeepCopyInto(out.(*ResourceQuotaStatus)) - return nil - }, InType: reflect.TypeOf(&ResourceQuotaStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ResourceRequirements).DeepCopyInto(out.(*ResourceRequirements)) - return nil - }, InType: reflect.TypeOf(&ResourceRequirements{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxOptions).DeepCopyInto(out.(*SELinuxOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOPersistentVolumeSource).DeepCopyInto(out.(*ScaleIOPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleIOVolumeSource).DeepCopyInto(out.(*ScaleIOVolumeSource)) - return nil - }, InType: reflect.TypeOf(&ScaleIOVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Secret).DeepCopyInto(out.(*Secret)) - return nil - }, InType: reflect.TypeOf(&Secret{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretEnvSource).DeepCopyInto(out.(*SecretEnvSource)) - return nil - }, InType: reflect.TypeOf(&SecretEnvSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretKeySelector).DeepCopyInto(out.(*SecretKeySelector)) - return nil - }, InType: reflect.TypeOf(&SecretKeySelector{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretList).DeepCopyInto(out.(*SecretList)) - return nil - }, InType: reflect.TypeOf(&SecretList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretProjection).DeepCopyInto(out.(*SecretProjection)) - return nil - }, InType: reflect.TypeOf(&SecretProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretReference).DeepCopyInto(out.(*SecretReference)) - return nil - }, InType: reflect.TypeOf(&SecretReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecretVolumeSource).DeepCopyInto(out.(*SecretVolumeSource)) - return nil - }, InType: reflect.TypeOf(&SecretVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SecurityContext).DeepCopyInto(out.(*SecurityContext)) - return nil - }, InType: reflect.TypeOf(&SecurityContext{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SerializedReference).DeepCopyInto(out.(*SerializedReference)) - return nil - }, InType: reflect.TypeOf(&SerializedReference{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Service).DeepCopyInto(out.(*Service)) - return nil - }, InType: reflect.TypeOf(&Service{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccount).DeepCopyInto(out.(*ServiceAccount)) - return nil - }, InType: reflect.TypeOf(&ServiceAccount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceAccountList).DeepCopyInto(out.(*ServiceAccountList)) - return nil - }, InType: reflect.TypeOf(&ServiceAccountList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceList).DeepCopyInto(out.(*ServiceList)) - return nil - }, InType: reflect.TypeOf(&ServiceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServicePort).DeepCopyInto(out.(*ServicePort)) - return nil - }, InType: reflect.TypeOf(&ServicePort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceProxyOptions).DeepCopyInto(out.(*ServiceProxyOptions)) - return nil - }, InType: reflect.TypeOf(&ServiceProxyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceSpec).DeepCopyInto(out.(*ServiceSpec)) - return nil - }, InType: reflect.TypeOf(&ServiceSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ServiceStatus).DeepCopyInto(out.(*ServiceStatus)) - return nil - }, InType: reflect.TypeOf(&ServiceStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SessionAffinityConfig).DeepCopyInto(out.(*SessionAffinityConfig)) - return nil - }, InType: reflect.TypeOf(&SessionAffinityConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSPersistentVolumeSource).DeepCopyInto(out.(*StorageOSPersistentVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSPersistentVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*StorageOSVolumeSource).DeepCopyInto(out.(*StorageOSVolumeSource)) - return nil - }, InType: reflect.TypeOf(&StorageOSVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Sysctl).DeepCopyInto(out.(*Sysctl)) - return nil - }, InType: reflect.TypeOf(&Sysctl{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*TCPSocketAction).DeepCopyInto(out.(*TCPSocketAction)) - return nil - }, InType: reflect.TypeOf(&TCPSocketAction{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Taint).DeepCopyInto(out.(*Taint)) - return nil - }, InType: reflect.TypeOf(&Taint{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Toleration).DeepCopyInto(out.(*Toleration)) - return nil - }, InType: reflect.TypeOf(&Toleration{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Volume).DeepCopyInto(out.(*Volume)) - return nil - }, InType: reflect.TypeOf(&Volume{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeMount).DeepCopyInto(out.(*VolumeMount)) - return nil - }, InType: reflect.TypeOf(&VolumeMount{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeProjection).DeepCopyInto(out.(*VolumeProjection)) - return nil - }, InType: reflect.TypeOf(&VolumeProjection{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VolumeSource).DeepCopyInto(out.(*VolumeSource)) - return nil - }, InType: reflect.TypeOf(&VolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*VsphereVirtualDiskVolumeSource).DeepCopyInto(out.(*VsphereVirtualDiskVolumeSource)) - return nil - }, InType: reflect.TypeOf(&VsphereVirtualDiskVolumeSource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*WeightedPodAffinityTerm).DeepCopyInto(out.(*WeightedPodAffinityTerm)) - return nil - }, InType: reflect.TypeOf(&WeightedPodAffinityTerm{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { *out = *in @@ -981,6 +246,22 @@ func (in *Binding) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -1425,6 +706,11 @@ func (in *Container) DeepCopyInto(out *Container) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } if in.LivenessProbe != nil { in, out := &in.LivenessProbe, &out.LivenessProbe if *in == nil { @@ -2097,6 +1383,25 @@ func (in *Event) DeepCopyInto(out *Event) { out.Source = in.Source in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + if *in == nil { + *out = nil + } else { + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + } + if in.Related != nil { + in, out := &in.Related, &out.Related + if *in == nil { + *out = nil + } else { + *out = new(ObjectReference) + **out = **in + } + } return } @@ -2153,6 +1458,23 @@ func (in *EventList) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EventSource) DeepCopyInto(out *EventSource) { *out = *in @@ -2448,6 +1770,45 @@ func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + if *in == nil { + *out = nil + } else { + *out = new(SecretReference) + **out = **in + } + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { *out = *in @@ -3604,6 +2965,15 @@ func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec **out = **in } } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -3773,7 +3143,7 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { if *in == nil { *out = nil } else { - *out = new(ISCSIVolumeSource) + *out = new(ISCSIPersistentVolumeSource) (*in).DeepCopyInto(*out) } } @@ -3894,6 +3264,15 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { (*in).DeepCopyInto(*out) } } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + if *in == nil { + *out = nil + } else { + *out = new(CSIPersistentVolumeSource) + **out = **in + } + } return } @@ -3937,6 +3316,15 @@ func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + if *in == nil { + *out = nil + } else { + *out = new(PersistentVolumeMode) + **out = **in + } + } return } @@ -4145,6 +3533,64 @@ func (in *PodCondition) DeepCopy() *PodCondition { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + if *in == nil { + *out = nil + } else { + *out = new(string) + **out = **in + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { *out = *in @@ -4515,6 +3961,15 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { **out = **in } } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + if *in == nil { + *out = nil + } else { + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + } return } @@ -6030,6 +5485,22 @@ func (in *Volume) DeepCopy() *Volume { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go index fbce8ee7..d97cffdb 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package package extensions // import "k8s.io/kubernetes/pkg/apis/extensions" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go index 7db398c6..48137fc6 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/register.go @@ -19,6 +19,7 @@ package extensions import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/apis/networking" ) @@ -51,19 +52,15 @@ func addKnownTypes(scheme *runtime.Scheme) error { &DeploymentList{}, &DeploymentRollback{}, &ReplicationControllerDummy{}, - &Scale{}, - &ThirdPartyResource{}, - &ThirdPartyResourceList{}, &DaemonSetList{}, &DaemonSet{}, - &ThirdPartyResourceData{}, - &ThirdPartyResourceDataList{}, &Ingress{}, &IngressList{}, &ReplicaSet{}, &ReplicaSetList{}, &PodSecurityPolicy{}, &PodSecurityPolicyList{}, + &autoscaling.Scale{}, &networking.NetworkPolicy{}, &networking.NetworkPolicyList{}, ) diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go index de4bbe37..e3697284 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/types.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -42,44 +42,6 @@ const ( SysctlsPodSecurityPolicyAnnotationKey string = "security.alpha.kubernetes.io/sysctls" ) -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 - - // label query over pods that should match the replicas count. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Dummy definition @@ -111,63 +73,8 @@ type CustomMetricCurrentStatusList struct { } // +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A ThirdPartyResource is a generic representation of a resource, it is used by add-ons and plugins to add new resource -// types to the API. It consists of one or more Versions of the api. -type ThirdPartyResource struct { - metav1.TypeMeta - - // Standard object metadata - // +optional - metav1.ObjectMeta - - // Description is the description of this object. - // +optional - Description string - - // Versions are versions for this third party object - Versions []APIVersion -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceList struct { - metav1.TypeMeta - - // Standard list metadata. - // +optional - metav1.ListMeta - - // Items is the list of horizontal pod autoscalers. - Items []ThirdPartyResource -} - -// An APIVersion represents a single concrete version of an object model. -// TODO: we should consider merge this struct with GroupVersion in metav1.go -type APIVersion struct { - // Name of this version (e.g. 'v1'). - Name string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// An internal object, used for versioned storage in etcd. Not exposed to the end user. -type ThirdPartyResourceData struct { - metav1.TypeMeta - // Standard object metadata. - // +optional - metav1.ObjectMeta - - // Data is the raw JSON data for this data. - // +optional - Data []byte -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type Deployment struct { @@ -524,6 +431,27 @@ type DaemonSetStatus struct { // create the name for the newest ControllerRevision. // +optional CollisionCount *int32 + + // Represents the latest available observations of a DaemonSet's current state. + Conditions []DaemonSetCondition +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType + // Status of the condition, one of True, False, Unknown. + Status api.ConditionStatus + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time + // The reason for the condition's last transition. + Reason string + // A human readable message indicating details about the transition. + Message string } // +genclient @@ -573,18 +501,6 @@ type DaemonSetList struct { Items []DaemonSet } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type ThirdPartyResourceDataList struct { - metav1.TypeMeta - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta - // Items is a list of third party objects - Items []ThirdPartyResourceData -} - // +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -755,8 +671,8 @@ type IngressBackend struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/kubernetes/pkg/apis/autoscaling.Scale,result=k8s.io/kubernetes/pkg/apis/autoscaling.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -888,7 +804,8 @@ type PodSecurityPolicySpec struct { Privileged bool // DefaultAddCapabilities is the default set of capabilities that will be added to the container // unless the pod spec specifically drops the capability. You may not list a capability in both - // DefaultAddCapabilities and RequiredDropCapabilities. + // DefaultAddCapabilities and RequiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the AllowedCapabilities list. // +optional DefaultAddCapabilities []api.Capability // RequiredDropCapabilities are the capabilities that will be dropped from the container. These @@ -943,6 +860,11 @@ type PodSecurityPolicySpec struct { // AllowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used. // +optional AllowedHostPaths []AllowedHostPath + // AllowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "Volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -962,9 +884,9 @@ type AllowedHostPath struct { // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { // Min is the start of the range, inclusive. - Min int + Min int32 // Max is the end of the range, inclusive. - Max int + Max int32 } // AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities @@ -1002,9 +924,16 @@ var ( Projected FSType = "projected" PortworxVolume FSType = "portworxVolume" ScaleIO FSType = "scaleIO" + CSI FSType = "csi" All FSType = "*" ) +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // Driver is the name of the Flexvolume driver. + Driver string +} + // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. type SELinuxStrategyOptions struct { // Rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go index d41048a5..ea801b3e 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/extensions/zz_generated.deepcopy.go @@ -22,253 +22,22 @@ package extensions import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*APIVersion).DeepCopyInto(out.(*APIVersion)) - return nil - }, InType: reflect.TypeOf(&APIVersion{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*AllowedHostPath).DeepCopyInto(out.(*AllowedHostPath)) - return nil - }, InType: reflect.TypeOf(&AllowedHostPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatus).DeepCopyInto(out.(*CustomMetricCurrentStatus)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricCurrentStatusList).DeepCopyInto(out.(*CustomMetricCurrentStatusList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricCurrentStatusList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTarget).DeepCopyInto(out.(*CustomMetricTarget)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTarget{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*CustomMetricTargetList).DeepCopyInto(out.(*CustomMetricTargetList)) - return nil - }, InType: reflect.TypeOf(&CustomMetricTargetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSet).DeepCopyInto(out.(*DaemonSet)) - return nil - }, InType: reflect.TypeOf(&DaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetList).DeepCopyInto(out.(*DaemonSetList)) - return nil - }, InType: reflect.TypeOf(&DaemonSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetSpec).DeepCopyInto(out.(*DaemonSetSpec)) - return nil - }, InType: reflect.TypeOf(&DaemonSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetStatus).DeepCopyInto(out.(*DaemonSetStatus)) - return nil - }, InType: reflect.TypeOf(&DaemonSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DaemonSetUpdateStrategy).DeepCopyInto(out.(*DaemonSetUpdateStrategy)) - return nil - }, InType: reflect.TypeOf(&DaemonSetUpdateStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Deployment).DeepCopyInto(out.(*Deployment)) - return nil - }, InType: reflect.TypeOf(&Deployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentCondition).DeepCopyInto(out.(*DeploymentCondition)) - return nil - }, InType: reflect.TypeOf(&DeploymentCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentList).DeepCopyInto(out.(*DeploymentList)) - return nil - }, InType: reflect.TypeOf(&DeploymentList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentRollback).DeepCopyInto(out.(*DeploymentRollback)) - return nil - }, InType: reflect.TypeOf(&DeploymentRollback{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentSpec).DeepCopyInto(out.(*DeploymentSpec)) - return nil - }, InType: reflect.TypeOf(&DeploymentSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStatus).DeepCopyInto(out.(*DeploymentStatus)) - return nil - }, InType: reflect.TypeOf(&DeploymentStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*DeploymentStrategy).DeepCopyInto(out.(*DeploymentStrategy)) - return nil - }, InType: reflect.TypeOf(&DeploymentStrategy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*FSGroupStrategyOptions).DeepCopyInto(out.(*FSGroupStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&FSGroupStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*GroupIDRange).DeepCopyInto(out.(*GroupIDRange)) - return nil - }, InType: reflect.TypeOf(&GroupIDRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressPath).DeepCopyInto(out.(*HTTPIngressPath)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressPath{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HTTPIngressRuleValue).DeepCopyInto(out.(*HTTPIngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&HTTPIngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*HostPortRange).DeepCopyInto(out.(*HostPortRange)) - return nil - }, InType: reflect.TypeOf(&HostPortRange{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Ingress).DeepCopyInto(out.(*Ingress)) - return nil - }, InType: reflect.TypeOf(&Ingress{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressBackend).DeepCopyInto(out.(*IngressBackend)) - return nil - }, InType: reflect.TypeOf(&IngressBackend{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressList).DeepCopyInto(out.(*IngressList)) - return nil - }, InType: reflect.TypeOf(&IngressList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRule).DeepCopyInto(out.(*IngressRule)) - return nil - }, InType: reflect.TypeOf(&IngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressRuleValue).DeepCopyInto(out.(*IngressRuleValue)) - return nil - }, InType: reflect.TypeOf(&IngressRuleValue{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressSpec).DeepCopyInto(out.(*IngressSpec)) - return nil - }, InType: reflect.TypeOf(&IngressSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressStatus).DeepCopyInto(out.(*IngressStatus)) - return nil - }, InType: reflect.TypeOf(&IngressStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IngressTLS).DeepCopyInto(out.(*IngressTLS)) - return nil - }, InType: reflect.TypeOf(&IngressTLS{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicy).DeepCopyInto(out.(*PodSecurityPolicy)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicyList).DeepCopyInto(out.(*PodSecurityPolicyList)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*PodSecurityPolicySpec).DeepCopyInto(out.(*PodSecurityPolicySpec)) - return nil - }, InType: reflect.TypeOf(&PodSecurityPolicySpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSet).DeepCopyInto(out.(*ReplicaSet)) - return nil - }, InType: reflect.TypeOf(&ReplicaSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetCondition).DeepCopyInto(out.(*ReplicaSetCondition)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetCondition{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetList).DeepCopyInto(out.(*ReplicaSetList)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetSpec).DeepCopyInto(out.(*ReplicaSetSpec)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicaSetStatus).DeepCopyInto(out.(*ReplicaSetStatus)) - return nil - }, InType: reflect.TypeOf(&ReplicaSetStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ReplicationControllerDummy).DeepCopyInto(out.(*ReplicationControllerDummy)) - return nil - }, InType: reflect.TypeOf(&ReplicationControllerDummy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollbackConfig).DeepCopyInto(out.(*RollbackConfig)) - return nil - }, InType: reflect.TypeOf(&RollbackConfig{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDaemonSet).DeepCopyInto(out.(*RollingUpdateDaemonSet)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDaemonSet{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RollingUpdateDeployment).DeepCopyInto(out.(*RollingUpdateDeployment)) - return nil - }, InType: reflect.TypeOf(&RollingUpdateDeployment{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*RunAsUserStrategyOptions).DeepCopyInto(out.(*RunAsUserStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&RunAsUserStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SELinuxStrategyOptions).DeepCopyInto(out.(*SELinuxStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SELinuxStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*Scale).DeepCopyInto(out.(*Scale)) - return nil - }, InType: reflect.TypeOf(&Scale{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleSpec).DeepCopyInto(out.(*ScaleSpec)) - return nil - }, InType: reflect.TypeOf(&ScaleSpec{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ScaleStatus).DeepCopyInto(out.(*ScaleStatus)) - return nil - }, InType: reflect.TypeOf(&ScaleStatus{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*SupplementalGroupsStrategyOptions).DeepCopyInto(out.(*SupplementalGroupsStrategyOptions)) - return nil - }, InType: reflect.TypeOf(&SupplementalGroupsStrategyOptions{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResource).DeepCopyInto(out.(*ThirdPartyResource)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResource{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceData).DeepCopyInto(out.(*ThirdPartyResourceData)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceData{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceDataList).DeepCopyInto(out.(*ThirdPartyResourceDataList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceDataList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*ThirdPartyResourceList).DeepCopyInto(out.(*ThirdPartyResourceList)) - return nil - }, InType: reflect.TypeOf(&ThirdPartyResourceList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*UserIDRange).DeepCopyInto(out.(*UserIDRange)) - return nil - }, InType: reflect.TypeOf(&UserIDRange{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *APIVersion) DeepCopyInto(out *APIVersion) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersion. -func (in *APIVersion) DeepCopy() *APIVersion { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(APIVersion) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } @@ -398,6 +167,23 @@ func (in *DaemonSet) DeepCopyObject() runtime.Object { } } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in @@ -480,6 +266,13 @@ func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { **out = **in } } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } return } @@ -1101,17 +894,17 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = *in if in.DefaultAddCapabilities != nil { in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.RequiredDropCapabilities != nil { in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.AllowedCapabilities != nil { in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]api.Capability, len(*in)) + *out = make([]core.Capability, len(*in)) copy(*out, *in) } if in.Volumes != nil { @@ -1142,6 +935,11 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedHostPath, len(*in)) copy(*out, *in) } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } return } @@ -1390,7 +1188,7 @@ func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { if *in == nil { *out = nil } else { - *out = new(api.SELinuxOptions) + *out = new(core.SELinuxOptions) **out = **in } } @@ -1407,76 +1205,6 @@ func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - if *in == nil { - *out = nil - } else { - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { *out = *in @@ -1498,138 +1226,6 @@ func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrat return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResource) DeepCopyInto(out *ThirdPartyResource) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Versions != nil { - in, out := &in.Versions, &out.Versions - *out = make([]APIVersion, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResource. -func (in *ThirdPartyResource) DeepCopy() *ThirdPartyResource { - if in == nil { - return nil - } - out := new(ThirdPartyResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResource) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceData) DeepCopyInto(out *ThirdPartyResourceData) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceData. -func (in *ThirdPartyResourceData) DeepCopy() *ThirdPartyResourceData { - if in == nil { - return nil - } - out := new(ThirdPartyResourceData) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceData) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceDataList) DeepCopyInto(out *ThirdPartyResourceDataList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResourceData, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceDataList. -func (in *ThirdPartyResourceDataList) DeepCopy() *ThirdPartyResourceDataList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceDataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceDataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ThirdPartyResourceList) DeepCopyInto(out *ThirdPartyResourceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ThirdPartyResource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ThirdPartyResourceList. -func (in *ThirdPartyResourceList) DeepCopy() *ThirdPartyResourceList { - if in == nil { - return nil - } - out := new(ThirdPartyResourceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ThirdPartyResourceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } else { - return nil - } -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *UserIDRange) DeepCopyInto(out *UserIDRange) { *out = *in diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go index b583051c..8b013e34 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/doc.go @@ -14,6 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package,register +// +k8s:deepcopy-gen=package // +groupName=networking.k8s.io package networking // import "k8s.io/kubernetes/pkg/apis/networking" diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go index 2109c374..ae37fcd5 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/types.go @@ -19,7 +19,7 @@ package networking import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // +genclient diff --git a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go index da38fb5e..aa5958ef 100644 --- a/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go +++ b/vendor/k8s.io/kubernetes/pkg/apis/networking/zz_generated.deepcopy.go @@ -22,58 +22,11 @@ package networking import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" intstr "k8s.io/apimachinery/pkg/util/intstr" - api "k8s.io/kubernetes/pkg/api" - reflect "reflect" + core "k8s.io/kubernetes/pkg/apis/core" ) -func init() { - SchemeBuilder.Register(RegisterDeepCopies) -} - -// RegisterDeepCopies adds deep-copy functions to the given scheme. Public -// to allow building arbitrary schemes. -// -// Deprecated: deepcopy registration will go away when static deepcopy is fully implemented. -func RegisterDeepCopies(scheme *runtime.Scheme) error { - return scheme.AddGeneratedDeepCopyFuncs( - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*IPBlock).DeepCopyInto(out.(*IPBlock)) - return nil - }, InType: reflect.TypeOf(&IPBlock{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicy).DeepCopyInto(out.(*NetworkPolicy)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicy{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyEgressRule).DeepCopyInto(out.(*NetworkPolicyEgressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyEgressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyIngressRule).DeepCopyInto(out.(*NetworkPolicyIngressRule)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyIngressRule{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyList).DeepCopyInto(out.(*NetworkPolicyList)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyList{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPeer).DeepCopyInto(out.(*NetworkPolicyPeer)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPeer{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicyPort).DeepCopyInto(out.(*NetworkPolicyPort)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicyPort{})}, - conversion.GeneratedDeepCopyFunc{Fn: func(in interface{}, out interface{}, c *conversion.Cloner) error { - in.(*NetworkPolicySpec).DeepCopyInto(out.(*NetworkPolicySpec)) - return nil - }, InType: reflect.TypeOf(&NetworkPolicySpec{})}, - ) -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPBlock) DeepCopyInto(out *IPBlock) { *out = *in @@ -268,7 +221,7 @@ func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { if *in == nil { *out = nil } else { - *out = new(api.Protocol) + *out = new(core.Protocol) **out = **in } } diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md index 868a37ee..b59eba40 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/README.md @@ -4,7 +4,7 @@ The mechanism for supporting cloud providers is currently in transition: the original method of implementing cloud provider-specific functionality within the main kubernetes tree (here) is no longer advised; however, the proposed solution is still in development. #### Guidance for potential cloud providers: -* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). +* Support for cloud providers is currently in a state of flux. Background information on motivation and the proposal for improving is in the github [proposal](https://git.k8s.io/community/contributors/design-proposals/cloud-provider/cloud-provider-refactoring.md). * In support of this plan, a new cloud-controller-manager binary was added in 1.6. This was the first of several steps (see the proposal for more information). * Attempts to contribute new cloud providers or (to a lesser extent) persistent volumes to the core repo will likely meet with some pushback from reviewers/approvers. * It is understood that this is an unfortunate situation in which 'the old way is no longer supported but the new way is not ready yet', but the initial path is unsustainable, and contributors are encouraged to participate in the implementation of the proposed long-term solution, as there is risk that PRs for new cloud providers here will not be approved. @@ -13,4 +13,4 @@ The mechanism for supporting cloud providers is currently in transition: the or #### Some additional context on status / direction: * 1.6 added a new cloud-controller-manager binary that may be used for testing the new out-of-core cloudprovider flow. * Setting cloud-provider=external allows for creation of a separate controller-manager binary -* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. +* 1.7 adds [extensible admission control](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_extension.md), further enabling topology customization. diff --git a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go index 00479e9c..2e46d47d 100644 --- a/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go +++ b/vendor/k8s.io/kubernetes/pkg/cloudprovider/cloud.go @@ -23,6 +23,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/informers" "k8s.io/kubernetes/pkg/controller" ) @@ -49,6 +50,11 @@ type Interface interface { HasClusterID() bool } +type InformerUser interface { + // SetInformers sets the informer on the cloud object. + SetInformers(informerFactory informers.SharedInformerFactory) +} + // Clusters is an abstract, pluggable interface for clusters of containers. type Clusters interface { // ListClusters lists the names of the available clusters. diff --git a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go index cb8121f6..04171762 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/client_builder.go @@ -28,14 +28,13 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" - clientgoclientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes" v1authentication "k8s.io/client-go/kubernetes/typed/authentication/v1" v1core "k8s.io/client-go/kubernetes/typed/core/v1" restclient "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/legacyscheme" + api "k8s.io/kubernetes/pkg/apis/core" "k8s.io/kubernetes/pkg/serviceaccount" "github.com/golang/glog" @@ -47,8 +46,8 @@ type ControllerClientBuilder interface { ConfigOrDie(name string) *restclient.Config Client(name string) (clientset.Interface, error) ClientOrDie(name string) clientset.Interface - ClientGoClient(name string) (clientgoclientset.Interface, error) - ClientGoClientOrDie(name string) clientgoclientset.Interface + ClientGoClient(name string) (clientset.Interface, error) + ClientGoClientOrDie(name string) clientset.Interface } // SimpleControllerClientBuilder returns a fixed client with different user agents @@ -86,15 +85,15 @@ func (b SimpleControllerClientBuilder) ClientOrDie(name string) clientset.Interf return client } -func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SimpleControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SimpleControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) @@ -276,15 +275,15 @@ func (b SAControllerClientBuilder) ClientOrDie(name string) clientset.Interface return client } -func (b SAControllerClientBuilder) ClientGoClient(name string) (clientgoclientset.Interface, error) { +func (b SAControllerClientBuilder) ClientGoClient(name string) (clientset.Interface, error) { clientConfig, err := b.Config(name) if err != nil { return nil, err } - return clientgoclientset.NewForConfig(clientConfig) + return clientset.NewForConfig(clientConfig) } -func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientgoclientset.Interface { +func (b SAControllerClientBuilder) ClientGoClientOrDie(name string) clientset.Interface { client, err := b.ClientGoClient(name) if err != nil { glog.Fatal(err) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 3eb1aa84..b662fa6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -42,9 +42,9 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/integer" clientretry "k8s.io/client-go/util/retry" - _ "k8s.io/kubernetes/pkg/api/install" podutil "k8s.io/kubernetes/pkg/api/v1/pod" - "k8s.io/kubernetes/pkg/api/validation" + _ "k8s.io/kubernetes/pkg/apis/core/install" + "k8s.io/kubernetes/pkg/apis/core/validation" hashutil "k8s.io/kubernetes/pkg/util/hash" taintutils "k8s.io/kubernetes/pkg/util/taints" @@ -410,7 +410,7 @@ type RealRSControl struct { var _ RSControlInterface = &RealRSControl{} func (r RealRSControl) PatchReplicaSet(namespace, name string, data []byte) error { - _, err := r.KubeClient.Extensions().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) + _, err := r.KubeClient.ExtensionsV1beta1().ReplicaSets(namespace).Patch(name, types.StrategicMergePatchType, data) return err } @@ -1008,7 +1008,7 @@ func PatchNodeTaints(c clientset.Interface, nodeName string, oldNode *v1.Node, n return fmt.Errorf("failed to create patch for node %q: %v", nodeName, err) } - _, err = c.CoreV1().Nodes().Patch(string(nodeName), types.StrategicMergePatchType, patchBytes) + _, err = c.CoreV1().Nodes().Patch(nodeName, types.StrategicMergePatchType, patchBytes) return err } diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go index 197397c4..80599039 100644 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go @@ -98,7 +98,7 @@ const ( // the API server as the certificate approaches expiration. RotateKubeletClientCertificate utilfeature.Feature = "RotateKubeletClientCertificate" - // owner: @msau + // owner: @msau42 // alpha: v1.7 // // A new volume type that supports local disks on a node. @@ -140,12 +140,6 @@ const ( // 'MemoryPressure', 'OutOfDisk' and 'DiskPressure'. TaintNodesByCondition utilfeature.Feature = "TaintNodesByCondition" - // owner: @haibinxie - // alpha: v1.8 - // - // Implement IPVS-based in-cluster service load balancing - SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" - // owner: @jsafrane // alpha: v1.8 // @@ -169,6 +163,55 @@ const ( // // Enable nodes to exclude themselves from service load balancers ServiceNodeExclusion utilfeature.Feature = "ServiceNodeExclusion" + + // owner: @jsafrane + // alpha: v1.9 + // + // Enable running mount utilities in containers. + MountContainers utilfeature.Feature = "MountContainers" + + // owner: @msau42 + // alpha: v1.9 + // + // Extend the default scheduler to be aware of PV topology and handle PV binding + // Before moving to beta, resolve Kubernetes issue #56180 + VolumeScheduling utilfeature.Feature = "VolumeScheduling" + + // owner: @vladimirvivien + // alpha: v1.9 + // + // Enable mount/attachment of Container Storage Interface (CSI) backed PVs + CSIPersistentVolume utilfeature.Feature = "CSIPersistentVolume" + + // owner @MrHohn + // alpha: v1.9 + // + // Support configurable pod DNS parameters. + CustomPodDNS utilfeature.Feature = "CustomPodDNS" + + // owner: @screeley44 + // alpha: v1.9 + // + // Enable Block volume support in containers. + BlockVolume utilfeature.Feature = "BlockVolume" + + // owner: @pospispa + // + // alpha: v1.9 + // Postpone deletion of a persistent volume claim in case it is used by a pod + PVCProtection utilfeature.Feature = "PVCProtection" + + // owner: @aveshagarwal + // alpha: v1.9 + // + // Enable resource limits priority function + ResourceLimitsPriorityFunction utilfeature.Feature = "ResourceLimitsPriorityFunction" + + // owner: @m1093782566 + // beta: v1.9 + // + // Implement IPVS-based in-cluster service load balancing + SupportIPVSProxyMode utilfeature.Feature = "SupportIPVSProxyMode" ) func init() { @@ -201,6 +244,14 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS ExpandPersistentVolumes: {Default: false, PreRelease: utilfeature.Alpha}, CPUManager: {Default: false, PreRelease: utilfeature.Alpha}, ServiceNodeExclusion: {Default: false, PreRelease: utilfeature.Alpha}, + MountContainers: {Default: false, PreRelease: utilfeature.Alpha}, + VolumeScheduling: {Default: false, PreRelease: utilfeature.Alpha}, + CSIPersistentVolume: {Default: false, PreRelease: utilfeature.Alpha}, + CustomPodDNS: {Default: false, PreRelease: utilfeature.Alpha}, + BlockVolume: {Default: false, PreRelease: utilfeature.Alpha}, + PVCProtection: {Default: false, PreRelease: utilfeature.Alpha}, + ResourceLimitsPriorityFunction: {Default: false, PreRelease: utilfeature.Alpha}, + SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Beta}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: @@ -212,6 +263,5 @@ var defaultKubernetesFeatureGates = map[utilfeature.Feature]utilfeature.FeatureS // inherited features from apiextensions-apiserver, relisted here to get a conflict if it is changed // unintentionally on either side: - apiextensionsfeatures.CustomResourceValidation: {Default: false, PreRelease: utilfeature.Alpha}, - SupportIPVSProxyMode: {Default: false, PreRelease: utilfeature.Alpha}, + apiextensionsfeatures.CustomResourceValidation: {Default: true, PreRelease: utilfeature.Beta}, } diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go new file mode 100644 index 00000000..400d001e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fieldpath supplies methods for extracting fields from objects +// given a path to a field. +package fieldpath // import "k8s.io/kubernetes/pkg/fieldpath" diff --git a/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go new file mode 100644 index 00000000..43754145 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/fieldpath/fieldpath.go @@ -0,0 +1,103 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fieldpath + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/util/validation" +) + +// FormatMap formats map[string]string to a string. +func FormatMap(m map[string]string) (fmtStr string) { + for key, value := range m { + fmtStr += fmt.Sprintf("%v=%q\n", key, value) + } + fmtStr = strings.TrimSuffix(fmtStr, "\n") + + return +} + +// ExtractFieldPathAsString extracts the field from the given object +// and returns it as a string. The object must be a pointer to an +// API type. +func ExtractFieldPathAsString(obj interface{}, fieldPath string) (string, error) { + accessor, err := meta.Accessor(obj) + if err != nil { + return "", nil + } + + if path, subscript, ok := SplitMaybeSubscriptedPath(fieldPath); ok { + switch path { + case "metadata.annotations": + if errs := validation.IsQualifiedName(strings.ToLower(subscript)); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetAnnotations()[subscript], nil + case "metadata.labels": + if errs := validation.IsQualifiedName(subscript); len(errs) != 0 { + return "", fmt.Errorf("invalid key subscript in %s: %s", fieldPath, strings.Join(errs, ";")) + } + return accessor.GetLabels()[subscript], nil + default: + return "", fmt.Errorf("fieldPath %q does not support subscript", fieldPath) + } + } + + switch fieldPath { + case "metadata.annotations": + return FormatMap(accessor.GetAnnotations()), nil + case "metadata.labels": + return FormatMap(accessor.GetLabels()), nil + case "metadata.name": + return accessor.GetName(), nil + case "metadata.namespace": + return accessor.GetNamespace(), nil + case "metadata.uid": + return string(accessor.GetUID()), nil + } + + return "", fmt.Errorf("unsupported fieldPath: %v", fieldPath) +} + +// SplitMaybeSubscriptedPath checks whether the specified fieldPath is +// subscripted, and +// - if yes, this function splits the fieldPath into path and subscript, and +// returns (path, subscript, true). +// - if no, this function returns (fieldPath, "", false). +// +// Example inputs and outputs: +// - "metadata.annotations['myKey']" --> ("metadata.annotations", "myKey", true) +// - "metadata.annotations['a[b]c']" --> ("metadata.annotations", "a[b]c", true) +// - "metadata.labels['']" --> ("metadata.labels", "", true) +// - "metadata.labels" --> ("metadata.labels", "", false) +func SplitMaybeSubscriptedPath(fieldPath string) (string, string, bool) { + if !strings.HasSuffix(fieldPath, "']") { + return fieldPath, "", false + } + s := strings.TrimSuffix(fieldPath, "']") + parts := strings.SplitN(s, "['", 2) + if len(parts) < 2 { + return fieldPath, "", false + } + if len(parts[0]) == 0 { + return fieldPath, "", false + } + return parts[0], parts[1], true +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go index 27c42c5c..04cac264 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime/constants.go @@ -25,3 +25,31 @@ const ( // NetworkReady means the runtime network is up and ready to accept containers which require network. NetworkReady = "NetworkReady" ) + +// LogStreamType is the type of the stream in CRI container log. +type LogStreamType string + +const ( + // Stdout is the stream type for stdout. + Stdout LogStreamType = "stdout" + // Stderr is the stream type for stderr. + Stderr LogStreamType = "stderr" +) + +// LogTag is the tag of a log line in CRI container log. +// Currently defined log tags: +// * First tag: Partial/End - P/E. +// The field in the container log format can be extended to include multiple +// tags by using a delimiter, but changes should be rare. If it becomes clear +// that better extensibility is desired, a more extensible format (e.g., json) +// should be adopted as a replacement and/or addition. +type LogTag string + +const ( + // LogTagPartial means the line is part of multiple lines. + LogTagPartial LogTag = "P" + // LogTagFull means the line is a single full line or the end of multiple lines. + LogTagFull LogTag = "F" + // LogTagDelimiter is the delimiter for different log tags. + LogTagDelimiter = ":" +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go index df1b274f..32dbc745 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/helpers.go @@ -46,8 +46,8 @@ type HandlerRunner interface { // RuntimeHelper wraps kubelet to make container runtime // able to get necessary informations like the RunContainerOptions, DNS settings, Host IP. type RuntimeHelper interface { - GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, useClusterFirstPolicy bool, err error) - GetClusterDNS(pod *v1.Pod) (dnsServers []string, dnsSearches []string, useClusterFirstPolicy bool, err error) + GenerateRunContainerOptions(pod *v1.Pod, container *v1.Container, podIP string) (contOpts *RunContainerOptions, err error) + GetPodDNS(pod *v1.Pod) (dnsConfig *runtimeapi.DNSConfig, err error) // GetPodCgroupParent returns the CgroupName identifer, and its literal cgroupfs form on the host // of a pod. GetPodCgroupParent(pod *v1.Pod) string diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go index f545b41a..2f5f4e3d 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/container/runtime.go @@ -428,10 +428,6 @@ type RunContainerOptions struct { // this directory will be used to create and mount the log file to // container.TerminationMessagePath PodContainerDir string - // The list of DNS servers for the container to use. - DNS []string - // The list of DNS search domains. - DNSSearch []string // The parent cgroup to pass to Docker CgroupParent string // The type of container rootfs @@ -450,9 +446,14 @@ type RunContainerOptions struct { type VolumeInfo struct { // Mounter is the volume's mounter Mounter volume.Mounter + // BlockVolumeMapper is the Block volume's mapper + BlockVolumeMapper volume.BlockVolumeMapper // SELinuxLabeled indicates whether this volume has had the // pod's SELinux label applied to it or not SELinuxLabeled bool + // Whether the volume permission is set to read-only or not + // This value is passed from volume.spec + ReadOnly bool } type VolumeMap map[string]VolumeInfo diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go index 42f7c681..ab28a1d8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/fake_iptables.go @@ -22,6 +22,7 @@ import ( "net" "strings" + "k8s.io/apimachinery/pkg/util/sets" utiliptables "k8s.io/kubernetes/pkg/util/iptables" ) @@ -36,12 +37,18 @@ type fakeTable struct { } type fakeIPTables struct { - tables map[string]*fakeTable + tables map[string]*fakeTable + builtinChains map[string]sets.String } func NewFakeIPTables() *fakeIPTables { return &fakeIPTables{ tables: make(map[string]*fakeTable, 0), + builtinChains: map[string]sets.String{ + string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"), + string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"), + string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"), + }, } } @@ -246,6 +253,7 @@ func (f *fakeIPTables) SaveInto(tableName utiliptables.Table, buffer *bytes.Buff } func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error { + allLines := string(data) buf := bytes.NewBuffer(data) var tableName utiliptables.Table for { @@ -274,6 +282,13 @@ func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, } } _, _ = f.ensureChain(tableName, chainName) + // The --noflush option for iptables-restore doesn't work for user-defined chains, only builtin chains. + // We should flush user-defined chains if the chain is not to be deleted + if !f.isBuiltinChain(tableName, chainName) && !strings.Contains(allLines, "-X "+string(chainName)) { + if err := f.FlushChain(tableName, chainName); err != nil { + return err + } + } } else if strings.HasPrefix(line, "-A") { parts := strings.Split(line, " ") if len(parts) < 3 { @@ -329,3 +344,10 @@ func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) { func (f *fakeIPTables) Destroy() { } + +func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool { + if builtinChains, ok := f.builtinChains[string(tableName)]; ok && builtinChains.Has(string(chainName)) { + return true + } + return false +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go index ea53c9c8..a31b4da0 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_manager.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "sync" @@ -177,6 +178,8 @@ func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (er chainsToRemove := []utiliptables.Chain{} for _, pm := range hostportMappings { chainsToRemove = append(chainsToRemove, getHostportChain(id, pm)) + // TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 + chainsToRemove = append(chainsToRemove, getBuggyHostportChain(id, pm)) } // remove rules that consists of target chains @@ -247,6 +250,16 @@ func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error // WARNING: Please do not change this function. Otherwise, HostportManager may not be able to // identify existing iptables chains. func getHostportChain(id string, pm *PortMapping) utiliptables.Chain { + hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol))) + encoded := base32.StdEncoding.EncodeToString(hash[:]) + return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) +} + +// This bugy func does bad conversion on HostPort from int32 to string. +// It may generates same chain names for different ports of the same pod, e.g. port 57119/55429/56833. +// `getHostportChain` fixed this bug. In order to cleanup the legacy chains/rules, it is temporarily left. +// TODO remove this after release 1.9, please refer https://github.com/kubernetes/kubernetes/pull/55153 +func getBuggyHostportChain(id string, pm *PortMapping) utiliptables.Chain { hash := sha256.Sum256([]byte(id + string(pm.HostPort) + string(pm.Protocol))) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go index 0086b745..3d7bfd6e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/network/hostport/hostport_syncer.go @@ -21,6 +21,7 @@ import ( "crypto/sha256" "encoding/base32" "fmt" + "strconv" "strings" "time" @@ -142,7 +143,7 @@ func writeLine(buf *bytes.Buffer, words ...string) { // this because IPTables Chain Names must be <= 28 chars long, and the longer // they are the harder they are to read. func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain { - hash := sha256.Sum256([]byte(string(pm.HostPort) + string(pm.Protocol) + podFullName)) + hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName)) encoded := base32.StdEncoding.EncodeToString(hash[:]) return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16]) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go index 5f872c82..919ed5a7 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/httpstream.go @@ -28,7 +28,7 @@ import ( "k8s.io/apimachinery/pkg/util/httpstream" "k8s.io/apimachinery/pkg/util/httpstream/spdy" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go index cb4bca04..b445c922 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/portforward/websocket.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/server/httplog" "k8s.io/apiserver/pkg/util/wsstream" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go index 9d488321..74f0595c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/server/remotecommand/httpstream.go @@ -32,7 +32,7 @@ import ( "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/util/wsstream" "k8s.io/client-go/tools/remotecommand" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go index 2b95b3b1..6bfc3dac 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/types/pod_update.go @@ -21,7 +21,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - kubeapi "k8s.io/kubernetes/pkg/api" + kubeapi "k8s.io/kubernetes/pkg/apis/core" ) const ( @@ -49,6 +49,8 @@ const ( // Pods with the given ids have unexpected status in this source, // kubelet should reconcile status with this source RECONCILE + // Pods with the given ids have been restored from a checkpoint. + RESTORE // These constants identify the sources of pods // Updates from a file diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go index 7ea31d0a..7ee1da6f 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/healthcheck/healthcheck.go @@ -33,7 +33,7 @@ import ( "k8s.io/apimachinery/pkg/util/clock" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) var nodeHealthzRetryInterval = 60 * time.Second diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go index 786434f7..1710d989 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/iptables/proxier.go @@ -41,9 +41,9 @@ import ( "k8s.io/apimachinery/pkg/util/wait" utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/tools/record" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" apiservice "k8s.io/kubernetes/pkg/api/service" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/proxy" "k8s.io/kubernetes/pkg/proxy/healthcheck" @@ -80,6 +80,9 @@ const ( // the mark-for-drop chain KubeMarkDropChain utiliptables.Chain = "KUBE-MARK-DROP" + + // the kubernetes forward chain + kubeForwardChain utiliptables.Chain = "KUBE-FORWARD" ) // IPTablesVersioner can query the current iptables version. @@ -440,11 +443,6 @@ func NewProxier(ipt utiliptables.Interface, recorder record.EventRecorder, healthzServer healthcheck.HealthzUpdater, ) (*Proxier, error) { - // check valid user input - if minSyncPeriod > syncPeriod { - return nil, fmt.Errorf("minSyncPeriod (%v) must be <= syncPeriod (%v)", minSyncPeriod, syncPeriod) - } - // Set the route_localnet sysctl we need for if err := sysctl.SetSysctl(sysctlRouteLocalnet, 1); err != nil { return nil, fmt.Errorf("can't set sysctl %s: %v", sysctlRouteLocalnet, err) @@ -458,9 +456,6 @@ func NewProxier(ipt utiliptables.Interface, } // Generate the masquerade mark to use for SNAT rules. - if masqueradeBit < 0 || masqueradeBit > 31 { - return nil, fmt.Errorf("invalid iptables-masquerade-bit %v not in [0, 31]", masqueradeBit) - } masqueradeValue := 1 << uint(masqueradeBit) masqueradeMark := fmt.Sprintf("%#08x/%#08x", masqueradeValue, masqueradeValue) @@ -543,6 +538,18 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { } } + // Unlink the forwarding chain. + args = []string{ + "-m", "comment", "--comment", "kubernetes forwarding rules", + "-j", string(kubeForwardChain), + } + if err := ipt.DeleteRule(utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + if !utiliptables.IsNotFoundError(err) { + glog.Errorf("Error removing pure-iptables proxy rule: %v", err) + encounteredError = true + } + } + // Flush and remove all of our chains. iptablesData := bytes.NewBuffer(nil) if err := ipt.SaveInto(utiliptables.TableNAT, iptablesData); err != nil { @@ -578,14 +585,28 @@ func CleanupLeftovers(ipt utiliptables.Interface) (encounteredError bool) { encounteredError = true } } - { - filterBuf := bytes.NewBuffer(nil) - writeLine(filterBuf, "*filter") - writeLine(filterBuf, fmt.Sprintf(":%s - [0:0]", kubeServicesChain)) - writeLine(filterBuf, fmt.Sprintf("-X %s", kubeServicesChain)) - writeLine(filterBuf, "COMMIT") + + // Flush and remove all of our chains. + iptablesData = bytes.NewBuffer(nil) + if err := ipt.SaveInto(utiliptables.TableFilter, iptablesData); err != nil { + glog.Errorf("Failed to execute iptables-save for %s: %v", utiliptables.TableFilter, err) + encounteredError = true + } else { + existingFilterChains := utiliptables.GetChainLines(utiliptables.TableFilter, iptablesData.Bytes()) + filterChains := bytes.NewBuffer(nil) + filterRules := bytes.NewBuffer(nil) + writeLine(filterChains, "*filter") + for _, chain := range []utiliptables.Chain{kubeServicesChain, kubeForwardChain} { + if _, found := existingFilterChains[chain]; found { + chainString := string(chain) + writeLine(filterChains, existingFilterChains[chain]) + writeLine(filterRules, "-X", chainString) + } + } + writeLine(filterRules, "COMMIT") + filterLines := append(filterChains.Bytes(), filterRules.Bytes()...) // Write it. - if err := ipt.Restore(utiliptables.TableFilter, filterBuf.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { + if err := ipt.Restore(utiliptables.TableFilter, filterLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters); err != nil { glog.Errorf("Failed to execute iptables-restore for %s: %v", utiliptables.TableFilter, err) encounteredError = true } @@ -798,7 +819,7 @@ func getLocalIPs(endpointsMap proxyEndpointsMap) map[types.NamespacedName]sets.S for svcPortName := range endpointsMap { for _, ep := range endpointsMap[svcPortName] { if ep.isLocal { - // If the endpoint has a bad format, ipPart() will log an + // If the endpoint has a bad format, utilproxy.IPPart() will log an // error and ep.IPPart() will return a null string. if ip := ep.IPPart(); ip != "" { nsn := svcPortName.NamespacedName @@ -1027,6 +1048,21 @@ func (proxier *Proxier) syncProxyRules() { } } + // Create and link the kube forward chain. + { + if _, err := proxier.iptables.EnsureChain(utiliptables.TableFilter, kubeForwardChain); err != nil { + glog.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableFilter, kubeForwardChain, err) + return + } + + comment := "kubernetes forward rules" + args := []string{"-m", "comment", "--comment", comment, "-j", string(kubeForwardChain)} + if _, err := proxier.iptables.EnsureRule(utiliptables.Prepend, utiliptables.TableFilter, utiliptables.ChainForward, args...); err != nil { + glog.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", utiliptables.TableFilter, utiliptables.ChainForward, kubeForwardChain, err) + return + } + } + // // Below this point we will not return until we try to write the iptables rules. // @@ -1069,6 +1105,11 @@ func (proxier *Proxier) syncProxyRules() { } else { writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeServicesChain)) } + if chain, ok := existingFilterChains[kubeForwardChain]; ok { + writeLine(proxier.filterChains, chain) + } else { + writeLine(proxier.filterChains, utiliptables.MakeChainLine(kubeForwardChain)) + } if chain, ok := existingNATChains[kubeServicesChain]; ok { writeLine(proxier.natChains, chain) } else { @@ -1516,6 +1557,18 @@ func (proxier *Proxier) syncProxyRules() { ) writeLine(proxier.natRules, args...) } else { + // First write session affinity rules only over local endpoints, if applicable. + if svcInfo.sessionAffinityType == api.ServiceAffinityClientIP { + for _, endpointChain := range localEndpointChains { + writeLine(proxier.natRules, + "-A", string(svcXlbChain), + "-m", "comment", "--comment", svcNameString, + "-m", "recent", "--name", string(endpointChain), + "--rcheck", "--seconds", strconv.Itoa(svcInfo.stickyMaxAgeSeconds), "--reap", + "-j", string(endpointChain)) + } + } + // Setup probability filter rules only over local endpoints for i, endpointChain := range localEndpointChains { // Balancing rules in the per-service chain. @@ -1562,6 +1615,40 @@ func (proxier *Proxier) syncProxyRules() { "-m", "addrtype", "--dst-type", "LOCAL", "-j", string(kubeNodePortsChain)) + // If the masqueradeMark has been added then we want to forward that same + // traffic, this allows NodePort traffic to be forwarded even if the default + // FORWARD policy is not accept. + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding rules"`, + "-m", "mark", "--mark", proxier.masqueradeMark, + "-j", "ACCEPT", + ) + + // The following rules can only be set if clusterCIDR has been defined. + if len(proxier.clusterCIDR) != 0 { + // The following two rules ensure the traffic after the initial packet + // accepted by the "kubernetes forwarding rules" rule above will be + // accepted, to be as specific as possible the traffic must be sourced + // or destined to the clusterCIDR (to/from a pod). + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-s", proxier.clusterCIDR, + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod source rule"`, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + writeLine(proxier.filterRules, + "-A", string(kubeForwardChain), + "-m", "comment", "--comment", `"kubernetes forwarding conntrack pod destination rule"`, + "-d", proxier.clusterCIDR, + "-m", "conntrack", + "--ctstate", "RELATED,ESTABLISHED", + "-j", "ACCEPT", + ) + } + // Write the end-of-table markers. writeLine(proxier.filterRules, "COMMIT") writeLine(proxier.natRules, "COMMIT") @@ -1609,7 +1696,7 @@ func (proxier *Proxier) syncProxyRules() { // Finish housekeeping. // TODO: these could be made more consistent. - for _, svcIP := range staleServices.List() { + for _, svcIP := range staleServices.UnsortedList() { if err := utilproxy.ClearUDPConntrackForIP(proxier.exec, svcIP); err != nil { glog.Errorf("Failed to delete stale service IP %s connections, error: %v", svcIP, err) } diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go index 7806c3a1..d99d61b5 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/conntrack.go @@ -27,7 +27,7 @@ import ( // Utilities for dealing with conntrack -const noConnectionToDelete = "0 flow entries have been deleted" +const NoConnectionToDelete = "0 flow entries have been deleted" func IsIPv6(netIP net.IP) bool { return netIP != nil && netIP.To4() == nil @@ -50,7 +50,7 @@ func parametersWithFamily(isIPv6 bool, parameters ...string) []string { func ClearUDPConntrackForIP(execer exec.Interface, ip string) error { parameters := parametersWithFamily(IsIPv6String(ip), "-D", "--orig-dst", ip, "-p", "udp") err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby-sit all udp connections to kubernetes services. @@ -84,7 +84,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro } parameters := parametersWithFamily(isIPv6, "-D", "-p", "udp", "--dport", strconv.Itoa(port)) err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { return fmt.Errorf("error deleting conntrack entries for UDP port: %d, error: %v", port, err) } return nil @@ -95,7 +95,7 @@ func ClearUDPConntrackForPort(execer exec.Interface, port int, isIPv6 bool) erro func ClearUDPConntrackForPeers(execer exec.Interface, origin, dest string) error { parameters := parametersWithFamily(IsIPv6String(origin), "-D", "--orig-dst", origin, "--dst-nat", dest, "-p", "udp") err := ExecConntrackTool(execer, parameters...) - if err != nil && !strings.Contains(err.Error(), noConnectionToDelete) { + if err != nil && !strings.Contains(err.Error(), NoConnectionToDelete) { // TODO: Better handling for deletion failure. When failure occur, stale udp connection may not get flushed. // These stale udp connection will keep black hole traffic. Making this a best effort operation for now, since it // is expensive to baby sit all udp connections to kubernetes services. diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go index 32e770d4..449e1126 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/endpoints.go @@ -19,6 +19,7 @@ package util import ( "fmt" "net" + "strconv" "github.com/golang/glog" ) @@ -32,12 +33,33 @@ func IPPart(s string) string { return s } // Must be IP:port - ip, _, err := net.SplitHostPort(s) + host, _, err := net.SplitHostPort(s) if err != nil { glog.Errorf("Error parsing '%s': %v", s, err) return "" } - return ip + // Check if host string is a valid IP address + if ip := net.ParseIP(host); ip != nil { + return ip.String() + } else { + glog.Errorf("invalid IP part '%s'", host) + } + return "" +} + +func PortPart(s string) (int, error) { + // Must be IP:port + _, port, err := net.SplitHostPort(s) + if err != nil { + glog.Errorf("Error parsing '%s': %v", s, err) + return -1, err + } + portNumber, err := strconv.Atoi(port) + if err != nil { + glog.Errorf("Error parsing '%s': %v", port, err) + return -1, err + } + return portNumber, nil } // ToCIDR returns a host address of the form /32 for diff --git a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go index 81734f54..cac0140c 100644 --- a/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/proxy/util/utils.go @@ -20,8 +20,8 @@ import ( "net" "k8s.io/apimachinery/pkg/types" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" "github.com/golang/glog" ) diff --git a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go index df4dc3a9..0503c151 100644 --- a/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go +++ b/vendor/k8s.io/kubernetes/pkg/serviceaccount/util.go @@ -20,7 +20,7 @@ import ( "k8s.io/api/core/v1" apiserverserviceaccount "k8s.io/apiserver/pkg/authentication/serviceaccount" "k8s.io/apiserver/pkg/authentication/user" - "k8s.io/kubernetes/pkg/api" + api "k8s.io/kubernetes/pkg/apis/core" ) // UserInfo returns a user.Info interface for the given namespace, service account name and UID diff --git a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go index 40e6ab2f..292288d2 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go +++ b/vendor/k8s.io/kubernetes/pkg/util/iptables/iptables.go @@ -82,6 +82,7 @@ type Table string const ( TableNAT Table = "nat" TableFilter Table = "filter" + TableMangle Table = "mangle" ) type Chain string @@ -91,6 +92,7 @@ const ( ChainPrerouting Chain = "PREROUTING" ChainOutput Chain = "OUTPUT" ChainInput Chain = "INPUT" + ChainForward Chain = "FORWARD" ) const ( @@ -592,7 +594,7 @@ func getIPTablesRestoreWaitFlag(exec utilexec.Interface, protocol Protocol) []st return nil } - return []string{"--wait=2"} + return []string{WaitSecondsString} } // getIPTablesRestoreVersionString runs "iptables-restore --version" to get the version string diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go new file mode 100644 index 00000000..1dedc5b7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount.go @@ -0,0 +1,140 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "fmt" + + "github.com/golang/glog" +) + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +type execMounter struct { + wrappedMounter Interface + exec Exec +} + +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{ + wrappedMounter: wrapped, + exec: exec, + } +} + +// execMounter implements mount.Interface +var _ Interface = &execMounter{} + +// Mount runs mount(8) using given exec interface. +func (m *execMounter) Mount(source string, target string, fstype string, options []string) error { + bind, bindRemountOpts := isBind(options) + + if bind { + err := m.doExecMount(source, target, fstype, []string{"bind"}) + if err != nil { + return err + } + return m.doExecMount(source, target, fstype, bindRemountOpts) + } + + return m.doExecMount(source, target, fstype, options) +} + +// doExecMount calls exec(mount ) using given exec interface. +func (m *execMounter) doExecMount(source, target, fstype string, options []string) error { + glog.V(5).Infof("Exec Mounting %s %s %s %v", source, target, fstype, options) + mountArgs := makeMountArgs(source, target, fstype, options) + output, err := m.exec.Run("mount", mountArgs...) + glog.V(5).Infof("Exec mounted %v: %v: %s", mountArgs, err, string(output)) + if err != nil { + return fmt.Errorf("mount failed: %v\nMounting command: %s\nMounting arguments: %s %s %s %v\nOutput: %s\n", + err, "mount", source, target, fstype, options, string(output)) + } + + return err +} + +// Unmount runs umount(8) using given exec interface. +func (m *execMounter) Unmount(target string) error { + outputBytes, err := m.exec.Run("umount", target) + if err == nil { + glog.V(5).Infof("Exec unmounted %s: %s", target, string(outputBytes)) + } else { + glog.V(5).Infof("Failed to exec unmount %s: err: %q, umount output: %s", target, err, string(outputBytes)) + } + + return err +} + +// List returns a list of all mounted filesystems. +func (m *execMounter) List() ([]MountPoint, error) { + return m.wrappedMounter.List() +} + +// IsLikelyNotMountPoint determines whether a path is a mountpoint. +func (m *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return m.wrappedMounter.IsLikelyNotMountPoint(file) +} + +// DeviceOpened checks if block device in use by calling Open with O_EXCL flag. +// Returns true if open returns errno EBUSY, and false if errno is nil. +// Returns an error if errno is any error other than EBUSY. +// Returns with error if pathname is not a device. +func (m *execMounter) DeviceOpened(pathname string) (bool, error) { + return m.wrappedMounter.DeviceOpened(pathname) +} + +// PathIsDevice uses FileInfo returned from os.Stat to check if path refers +// to a device. +func (m *execMounter) PathIsDevice(pathname string) (bool, error) { + return m.wrappedMounter.PathIsDevice(pathname) +} + +//GetDeviceNameFromMount given a mount point, find the volume id from checking /proc/mounts +func (m *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return m.wrappedMounter.GetDeviceNameFromMount(mountPath, pluginDir) +} + +func (m *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return m.wrappedMounter.IsMountPointMatch(mp, dir) +} + +func (m *execMounter) IsNotMountPoint(dir string) (bool, error) { + return m.wrappedMounter.IsNotMountPoint(dir) +} + +func (m *execMounter) MakeRShared(path string) error { + return m.wrappedMounter.MakeRShared(path) +} + +func (m *execMounter) GetFileType(pathname string) (FileType, error) { + return m.wrappedMounter.GetFileType(pathname) +} + +func (m *execMounter) MakeFile(pathname string) error { + return m.wrappedMounter.MakeFile(pathname) +} + +func (m *execMounter) MakeDir(pathname string) error { + return m.wrappedMounter.MakeDir(pathname) +} + +func (m *execMounter) ExistsPath(pathname string) bool { + return m.wrappedMounter.ExistsPath(pathname) +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go new file mode 100644 index 00000000..136704b2 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/exec_mount_unsupported.go @@ -0,0 +1,87 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mount + +import ( + "errors" +) + +type execMounter struct{} + +// ExecMounter is a mounter that uses provided Exec interface to mount and +// unmount a filesystem. For all other calls it uses a wrapped mounter. +func NewExecMounter(exec Exec, wrapped Interface) Interface { + return &execMounter{} +} + +func (mounter *execMounter) Mount(source string, target string, fstype string, options []string) error { + return nil +} + +func (mounter *execMounter) Unmount(target string) error { + return nil +} + +func (mounter *execMounter) List() ([]MountPoint, error) { + return []MountPoint{}, nil +} + +func (mounter *execMounter) IsMountPointMatch(mp MountPoint, dir string) bool { + return (mp.Path == dir) +} + +func (mounter *execMounter) IsNotMountPoint(dir string) (bool, error) { + return IsNotMountPoint(mounter, dir) +} + +func (mounter *execMounter) IsLikelyNotMountPoint(file string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) { + return "", nil +} + +func (mounter *execMounter) DeviceOpened(pathname string) (bool, error) { + return false, nil +} + +func (mounter *execMounter) PathIsDevice(pathname string) (bool, error) { + return true, nil +} + +func (mounter *execMounter) MakeRShared(path string) error { + return nil +} + +func (mounter *execMounter) GetFileType(pathname string) (FileType, error) { + return FileType("fake"), errors.New("not implemented") +} + +func (mounter *execMounter) MakeDir(pathname string) error { + return nil +} + +func (mounter *execMounter) MakeFile(pathname string) error { + return nil +} + +func (mounter *execMounter) ExistsPath(pathname string) bool { + return true +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go index 9caec670..71064f32 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_linux.go @@ -498,7 +498,7 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, if mountErr != nil { // Mount failed. This indicates either that the disk is unformatted or // it contains an unexpected filesystem. - existingFormat, err := mounter.getDiskFormat(source) + existingFormat, err := mounter.GetDiskFormat(source) if err != nil { return err } @@ -536,8 +536,8 @@ func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, return mountErr } -// getDiskFormat uses 'lsblk' to see if the given disk is unformated -func (mounter *SafeFormatAndMount) getDiskFormat(disk string) (string, error) { +// GetDiskFormat uses 'lsblk' to see if the given disk is unformated +func (mounter *SafeFormatAndMount) GetDiskFormat(disk string) (string, error) { args := []string{"-n", "-o", "FSTYPE", disk} glog.V(4).Infof("Attempting to determine if disk %q is formatted using lsblk with args: (%v)", disk, args) dataOut, err := mounter.Exec.Run("lsblk", args...) diff --git a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go index 244c22c0..87d1e374 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go +++ b/vendor/k8s.io/kubernetes/pkg/util/mount/mount_unsupported.go @@ -86,7 +86,7 @@ func (mounter *Mounter) MakeRShared(path string) error { } func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error { - return nil + return mounter.Interface.Mount(source, target, fstype, options) } func (mounter *SafeFormatAndMount) diskLooksUnformatted(disk string) (bool, error) { diff --git a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go index 1a10939b..a970bf7f 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go +++ b/vendor/k8s.io/kubernetes/pkg/util/pointer/pointer.go @@ -60,3 +60,9 @@ func Int32PtrDerefOr(ptr *int32, def int32) int32 { } return def } + +// BoolPtr returns a pointer to a bool +func BoolPtr(b bool) *bool { + o := b + return &o +} diff --git a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go index a86839b5..76e4bb86 100644 --- a/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go +++ b/vendor/k8s.io/kubernetes/pkg/util/taints/taints.go @@ -25,8 +25,8 @@ import ( utilerrors "k8s.io/apimachinery/pkg/util/errors" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/kubernetes/pkg/api" - "k8s.io/kubernetes/pkg/api/helper" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/core/helper" ) const ( diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index 1ec55d23..42ceaa40 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -208,6 +208,26 @@ type ExpandableVolumePlugin interface { RequiresFSResize() bool } +// BlockVolumePlugin is an extend interface of VolumePlugin and is used for block volumes support. +type BlockVolumePlugin interface { + VolumePlugin + // NewBlockVolumeMapper creates a new volume.BlockVolumeMapper from an API specification. + // Ownership of the spec pointer in *not* transferred. + // - spec: The v1.Volume spec + // - pod: The enclosing pod + NewBlockVolumeMapper(spec *Spec, podRef *v1.Pod, opts VolumeOptions) (BlockVolumeMapper, error) + // NewBlockVolumeUnmapper creates a new volume.BlockVolumeUnmapper from recoverable state. + // - name: The volume name, as per the v1.Volume spec. + // - podUID: The UID of the enclosing pod + NewBlockVolumeUnmapper(name string, podUID types.UID) (BlockVolumeUnmapper, error) + // ConstructBlockVolumeSpec constructs a volume spec based on the given + // podUID, volume name and a pod device map path. + // The spec may have incomplete information due to limited information + // from input. This function is used by volume manager to reconstruct + // volume spec by reading the volume directories from disk. + ConstructBlockVolumeSpec(podUID types.UID, volumeName, mountPath string) (*Spec, error) +} + // VolumeHost is an interface that plugins can use to access the kubelet. type VolumeHost interface { // GetPluginDir returns the absolute path to a directory under which @@ -216,6 +236,11 @@ type VolumeHost interface { // GetPodPluginDir(). GetPluginDir(pluginName string) string + // GetVolumeDevicePluginDir returns the absolute path to a directory + // under which a given plugin may store data. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/ + GetVolumeDevicePluginDir(pluginName string) string + // GetPodVolumeDir returns the absolute path a directory which // represents the named volume under the named plugin for the given // pod. If the specified pod does not exist, the result of this call @@ -228,6 +253,13 @@ type VolumeHost interface { // directory might not actually exist on disk yet. GetPodPluginDir(podUID types.UID, pluginName string) string + // GetPodVolumeDeviceDir returns the absolute path a directory which + // represents the named plugin for the given pod. + // If the specified pod does not exist, the result of this call + // might not exist. + // ex. pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/ + GetPodVolumeDeviceDir(podUID types.UID, pluginName string) string + // GetKubeClient returns a client interface GetKubeClient() clientset.Interface @@ -271,6 +303,9 @@ type VolumeHost interface { // Returns the labels on the node GetNodeLabels() (map[string]string, error) + + // Returns the name of the node + GetNodeName() types.NodeName } // VolumePluginMgr tracks registered plugins. @@ -675,6 +710,32 @@ func (pm *VolumePluginMgr) FindExpandablePluginByName(name string) (ExpandableVo return nil, nil } +// FindMapperPluginBySpec fetches a block volume plugin by spec. +func (pm *VolumePluginMgr) FindMapperPluginBySpec(spec *Spec) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginBySpec(spec) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + +// FindMapperPluginByName fetches a block volume plugin by name. +func (pm *VolumePluginMgr) FindMapperPluginByName(name string) (BlockVolumePlugin, error) { + volumePlugin, err := pm.FindPluginByName(name) + if err != nil { + return nil, err + } + + if blockVolumePlugin, ok := volumePlugin.(BlockVolumePlugin); ok { + return blockVolumePlugin, nil + } + return nil, nil +} + // NewPersistentVolumeRecyclerPodTemplate creates a template for a recycler // pod. By default, a recycler pod simply runs "rm -rf" on a volume and tests // for emptiness. Most attributes of the template will be correct for most diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/error.go b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go new file mode 100644 index 00000000..2c965586 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/error.go @@ -0,0 +1,41 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + k8stypes "k8s.io/apimachinery/pkg/types" +) + +// This error on attach indicates volume is attached to a different node +// than we expected. +type DanglingAttachError struct { + msg string + CurrentNode k8stypes.NodeName + DevicePath string +} + +func (err *DanglingAttachError) Error() string { + return err.msg +} + +func NewDanglingError(msg string, node k8stypes.NodeName, devicePath string) error { + return &DanglingAttachError{ + msg: msg, + CurrentNode: node, + DevicePath: devicePath, + } +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go new file mode 100644 index 00000000..84631545 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/finalizer.go @@ -0,0 +1,68 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "k8s.io/api/core/v1" +) + +const ( + // Name of finalizer on PVCs that have a running pod. + PVCProtectionFinalizer = "kubernetes.io/pvc-protection" +) + +// IsPVCBeingDeleted returns: +// true: in case PVC is being deleted, i.e. ObjectMeta.DeletionTimestamp is set +// false: in case PVC is not being deleted, i.e. ObjectMeta.DeletionTimestamp is nil +func IsPVCBeingDeleted(pvc *v1.PersistentVolumeClaim) bool { + return pvc.ObjectMeta.DeletionTimestamp != nil +} + +// IsProtectionFinalizerPresent returns true in case PVCProtectionFinalizer is +// present among the pvc.Finalizers +func IsProtectionFinalizerPresent(pvc *v1.PersistentVolumeClaim) bool { + for _, finalizer := range pvc.Finalizers { + if finalizer == PVCProtectionFinalizer { + return true + } + } + return false +} + +// RemoveProtectionFinalizer returns pvc without PVCProtectionFinalizer in case +// it's present in pvc.Finalizers. It expects that pvc is writable (i.e. is not +// informer's cached copy.) +func RemoveProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + newFinalizers := make([]string, 0) + for _, finalizer := range pvc.Finalizers { + if finalizer != PVCProtectionFinalizer { + newFinalizers = append(newFinalizers, finalizer) + } + } + if len(newFinalizers) == 0 { + // Sanitize for unit tests so we don't need to distinguish empty array + // and nil. + newFinalizers = nil + } + pvc.Finalizers = newFinalizers +} + +// AddProtectionFinalizer adds PVCProtectionFinalizer to pvc. It expects that +// pvc is writable (i.e. is not informer's cached copy.) +func AddProtectionFinalizer(pvc *v1.PersistentVolumeClaim) { + pvc.Finalizers = append(pvc.Finalizers, PVCProtectionFinalizer) +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go index ad4ac6a0..106036df 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/util/util.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util.go @@ -21,7 +21,7 @@ import ( "io/ioutil" "os" "path" - + "path/filepath" "strings" "github.com/golang/glog" @@ -30,15 +30,23 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/api/legacyscheme" - v1helper "k8s.io/kubernetes/pkg/api/v1/helper" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis" "k8s.io/kubernetes/pkg/util/mount" ) -const readyFileName = "ready" +const ( + readyFileName = "ready" + losetupPath = "losetup" + + ErrDeviceNotFound = "device not found" + ErrDeviceNotSupported = "device not supported" + ErrNotAvailable = "not available" +) // IsReady checks for the existence of a regular file // called 'ready' in the given directory and returns @@ -233,7 +241,7 @@ func LoadPodFromFile(filePath string) (*v1.Pod, error) { } pod := &v1.Pod{} - codec := legacyscheme.Codecs.LegacyCodec(legacyscheme.Registry.GroupOrDie(v1.GroupName).GroupVersion) + codec := legacyscheme.Codecs.UniversalDecoder() if err := runtime.DecodeInto(codec, podDef, pod); err != nil { return nil, fmt.Errorf("failed decoding file: %v", err) } @@ -270,3 +278,201 @@ func stringToSet(str, delimiter string) (sets.String, error) { } return zonesSet, nil } + +// BlockVolumePathHandler defines a set of operations for handling block volume-related operations +type BlockVolumePathHandler interface { + // MapDevice creates a symbolic link to block device under specified map path + MapDevice(devicePath string, mapPath string, linkName string) error + // UnmapDevice removes a symbolic link to block device under specified map path + UnmapDevice(mapPath string, linkName string) error + // RemovePath removes a file or directory on specified map path + RemoveMapPath(mapPath string) error + // IsSymlinkExist retruns true if specified symbolic link exists + IsSymlinkExist(mapPath string) (bool, error) + // GetDeviceSymlinkRefs searches symbolic links under global map path + GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) + // FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath + // corresponding to map path symlink, and then return global map path with pod uuid. + FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) + // AttachFileDevice takes a path to a regular file and makes it available as an + // attached block device. + AttachFileDevice(path string) (string, error) + // GetLoopDevice returns the full path to the loop device associated with the given path. + GetLoopDevice(path string) (string, error) + // RemoveLoopDevice removes specified loopback device + RemoveLoopDevice(device string) error +} + +// NewBlockVolumePathHandler returns a new instance of BlockVolumeHandler. +func NewBlockVolumePathHandler() BlockVolumePathHandler { + var volumePathHandler VolumePathHandler + return volumePathHandler +} + +// VolumePathHandler is path related operation handlers for block volume +type VolumePathHandler struct { +} + +// MapDevice creates a symbolic link to block device under specified map path +func (v VolumePathHandler) MapDevice(devicePath string, mapPath string, linkName string) error { + // Example of global map path: + // globalMapPath/linkName: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{podUid} + // linkName: {podUid} + // + // Example of pod device map path: + // podDeviceMapPath/linkName: pods/{podUid}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + // linkName: {volumeName} + if len(devicePath) == 0 { + return fmt.Errorf("Failed to map device to map path. devicePath is empty") + } + if len(mapPath) == 0 { + return fmt.Errorf("Failed to map device to map path. mapPath is empty") + } + if !filepath.IsAbs(mapPath) { + return fmt.Errorf("The map path should be absolute: map path: %s", mapPath) + } + glog.V(5).Infof("MapDevice: devicePath %s", devicePath) + glog.V(5).Infof("MapDevice: mapPath %s", mapPath) + glog.V(5).Infof("MapDevice: linkName %s", linkName) + + // Check and create mapPath + _, err := os.Stat(mapPath) + if err != nil && !os.IsNotExist(err) { + glog.Errorf("cannot validate map path: %s", mapPath) + return err + } + if err = os.MkdirAll(mapPath, 0750); err != nil { + return fmt.Errorf("Failed to mkdir %s, error %v", mapPath, err) + } + // Remove old symbolic link(or file) then create new one. + // This should be done because current symbolic link is + // stale accross node reboot. + linkPath := path.Join(mapPath, string(linkName)) + if err = os.Remove(linkPath); err != nil && !os.IsNotExist(err) { + return err + } + err = os.Symlink(devicePath, linkPath) + return err +} + +// UnmapDevice removes a symbolic link associated to block device under specified map path +func (v VolumePathHandler) UnmapDevice(mapPath string, linkName string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to unmap device from map path. mapPath is empty") + } + glog.V(5).Infof("UnmapDevice: mapPath %s", mapPath) + glog.V(5).Infof("UnmapDevice: linkName %s", linkName) + + // Check symbolic link exists + linkPath := path.Join(mapPath, string(linkName)) + if islinkExist, checkErr := v.IsSymlinkExist(linkPath); checkErr != nil { + return checkErr + } else if !islinkExist { + glog.Warningf("Warning: Unmap skipped because symlink does not exist on the path: %v", linkPath) + return nil + } + err := os.Remove(linkPath) + return err +} + +// RemoveMapPath removes a file or directory on specified map path +func (v VolumePathHandler) RemoveMapPath(mapPath string) error { + if len(mapPath) == 0 { + return fmt.Errorf("Failed to remove map path. mapPath is empty") + } + glog.V(5).Infof("RemoveMapPath: mapPath %s", mapPath) + err := os.RemoveAll(mapPath) + if err != nil && !os.IsNotExist(err) { + return err + } + return nil +} + +// IsSymlinkExist returns true if specified file exists and the type is symbolik link. +// If file doesn't exist, or file exists but not symbolick link, return false with no error. +// On other cases, return false with error from Lstat(). +func (v VolumePathHandler) IsSymlinkExist(mapPath string) (bool, error) { + fi, err := os.Lstat(mapPath) + if err == nil { + // If file exits and it's symbolick link, return true and no error + if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + return true, nil + } + // If file exits but it's not symbolick link, return fale and no error + return false, nil + } + // If file doesn't exist, return false and no error + if os.IsNotExist(err) { + return false, nil + } + // Return error from Lstat() + return false, err +} + +// GetDeviceSymlinkRefs searches symbolic links under global map path +func (v VolumePathHandler) GetDeviceSymlinkRefs(devPath string, mapPath string) ([]string, error) { + var refs []string + files, err := ioutil.ReadDir(mapPath) + if err != nil { + return nil, fmt.Errorf("Directory cannot read %v", err) + } + for _, file := range files { + if file.Mode()&os.ModeSymlink != os.ModeSymlink { + continue + } + filename := file.Name() + filepath, err := os.Readlink(path.Join(mapPath, filename)) + if err != nil { + return nil, fmt.Errorf("Symbolic link cannot be retrieved %v", err) + } + glog.V(5).Infof("GetDeviceSymlinkRefs: filepath: %v, devPath: %v", filepath, devPath) + if filepath == devPath { + refs = append(refs, path.Join(mapPath, filename)) + } + } + glog.V(5).Infof("GetDeviceSymlinkRefs: refs %v", refs) + return refs, nil +} + +// FindGlobalMapPathUUIDFromPod finds {pod uuid} symbolic link under globalMapPath +// corresponding to map path symlink, and then return global map path with pod uuid. +// ex. mapPath symlink: pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} -> /dev/sdX +// globalMapPath/{pod uuid}: plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} -> /dev/sdX +func (v VolumePathHandler) FindGlobalMapPathUUIDFromPod(pluginDir, mapPath string, podUID types.UID) (string, error) { + var globalMapPathUUID string + // Find symbolic link named pod uuid under plugin dir + err := filepath.Walk(pluginDir, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if (fi.Mode()&os.ModeSymlink == os.ModeSymlink) && (fi.Name() == string(podUID)) { + glog.V(5).Infof("FindGlobalMapPathFromPod: path %s, mapPath %s", path, mapPath) + if res, err := compareSymlinks(path, mapPath); err == nil && res { + globalMapPathUUID = path + } + } + return nil + }) + if err != nil { + return "", err + } + glog.V(5).Infof("FindGlobalMapPathFromPod: globalMapPathUUID %s", globalMapPathUUID) + // Return path contains global map path + {pod uuid} + return globalMapPathUUID, nil +} + +func compareSymlinks(global, pod string) (bool, error) { + devGlobal, err := os.Readlink(global) + if err != nil { + return false, err + } + devPod, err := os.Readlink(pod) + if err != nil { + return false, err + } + glog.V(5).Infof("CompareSymlinks: devGloBal %s, devPod %s", devGlobal, devPod) + if devGlobal == devPod { + return true, nil + } + return false, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go new file mode 100644 index 00000000..755a10f0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_linux.go @@ -0,0 +1,106 @@ +// +build linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "errors" + "fmt" + "os" + "os/exec" + "strings" + + "github.com/golang/glog" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + blockDevicePath, err := v.GetLoopDevice(path) + if err != nil && err.Error() != ErrDeviceNotFound { + return "", err + } + + // If no existing loop device for the path, create one + if blockDevicePath == "" { + glog.V(4).Infof("Creating device for path: %s", path) + blockDevicePath, err = makeLoopDevice(path) + if err != nil { + return "", err + } + } + return blockDevicePath, nil +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + _, err := os.Stat(path) + if os.IsNotExist(err) { + return "", errors.New(ErrNotAvailable) + } + if err != nil { + return "", fmt.Errorf("not attachable: %v", err) + } + + args := []string{"-j", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device discover command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +func makeLoopDevice(path string) (string, error) { + args := []string{"-f", "--show", path} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + glog.V(2).Infof("Failed device create command for path %s: %v", path, err) + return "", err + } + return parseLosetupOutputForDevice(out) +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + args := []string{"-d", device} + cmd := exec.Command(losetupPath, args...) + out, err := cmd.CombinedOutput() + if err != nil { + if !strings.Contains(string(out), "No such device or address") { + return err + } + } + return nil +} + +func parseLosetupOutputForDevice(output []byte) (string, error) { + if len(output) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + + // losetup returns device in the format: + // /dev/loop1: [0073]:148662 (/dev/sda) + device := strings.TrimSpace(strings.SplitN(string(output), ":", 2)[0]) + if len(device) == 0 { + return "", errors.New(ErrDeviceNotFound) + } + return device, nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go new file mode 100644 index 00000000..930e4f66 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/volume/util/util_unsupported.go @@ -0,0 +1,39 @@ +// +build !linux + +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package util + +import ( + "fmt" +) + +// AttachFileDevice takes a path to a regular file and makes it available as an +// attached block device. +func (v VolumePathHandler) AttachFileDevice(path string) (string, error) { + return "", fmt.Errorf("AttachFileDevice not supported for this build.") +} + +// GetLoopDevice returns the full path to the loop device associated with the given path. +func (v VolumePathHandler) GetLoopDevice(path string) (string, error) { + return "", fmt.Errorf("GetLoopDevice not supported for this build.") +} + +// RemoveLoopDevice removes specified loopback device +func (v VolumePathHandler) RemoveLoopDevice(device string) error { + return fmt.Errorf("RemoveLoopDevice not supported for this build.") +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/volume.go b/vendor/k8s.io/kubernetes/pkg/volume/volume.go index 78cb21a3..47196355 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/volume.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/volume.go @@ -37,6 +37,19 @@ type Volume interface { MetricsProvider } +// BlockVolume interface provides methods to generate global map path +// and pod device map path. +type BlockVolume interface { + // GetGlobalMapPath returns a global map path which contains + // symbolic links associated to a block device. + // ex. plugins/kubernetes.io/{PluginName}/{DefaultKubeletVolumeDevicesDirName}/{volumePluginDependentPath}/{pod uuid} + GetGlobalMapPath(spec *Spec) (string, error) + // GetPodDeviceMapPath returns a pod device map path + // and name of a symbolic link associated to a block device. + // ex. pods/{podUid}}/{DefaultKubeletVolumeDevicesDirName}/{escapeQualifiedPluginName}/{volumeName} + GetPodDeviceMapPath() (string, string) +} + // MetricsProvider exposes metrics (e.g. used,available space) related to a // Volume. type MetricsProvider interface { @@ -132,6 +145,34 @@ type Unmounter interface { TearDownAt(dir string) error } +// BlockVolumeMapper interface provides methods to set up/map the volume. +type BlockVolumeMapper interface { + BlockVolume + // SetUpDevice prepares the volume to a self-determined directory path, + // which may or may not exist yet and returns combination of physical + // device path of a block volume and error. + // If the plugin is non-attachable, it should prepare the device + // in /dev/ (or where appropriate) and return unique device path. + // Unique device path across kubelet node reboot is required to avoid + // unexpected block volume destruction. + // If the plugin is attachable, it should not do anything here, + // just return empty string for device path. + // Instead, attachable plugin have to return unique device path + // at attacher.Attach() and attacher.WaitForAttach(). + // This may be called more than once, so implementations must be idempotent. + SetUpDevice() (string, error) +} + +// BlockVolumeUnmapper interface provides methods to cleanup/unmap the volumes. +type BlockVolumeUnmapper interface { + BlockVolume + // TearDownDevice removes traces of the SetUpDevice procedure under + // a self-determined directory. + // If the plugin is non-attachable, this method detaches the volume + // from a node. + TearDownDevice(mapPath string, devicePath string) error +} + // Provisioner is an interface that creates templates for PersistentVolumes // and can create the volume as a new resource in the infrastructure provider. type Provisioner interface { @@ -195,8 +236,10 @@ type BulkVolumeVerifier interface { // Detacher can detach a volume from a node. type Detacher interface { - // Detach the given device from the node with the given Name. - Detach(deviceName string, nodeName types.NodeName) error + // Detach the given volume from the node with the given Name. + // volumeName is name of the volume as returned from plugin's + // GetVolumeName(). + Detach(volumeName string, nodeName types.NodeName) error // UnmountDevice unmounts the global mount of the disk. This // should only be called once all bind mounts have been From d91df686387f3b155bbe58c1baa7ce9fb1ccef43 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Thu, 14 Dec 2017 00:09:36 +0100 Subject: [PATCH 04/59] CI: split critest from e2e Signed-off-by: Antonio Murdaca --- contrib/test/integration/main.yml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml index 722290aa..e5c22c03 100644 --- a/contrib/test/integration/main.yml +++ b/contrib/test/integration/main.yml @@ -41,6 +41,7 @@ tags: - integration - e2e + - critest tasks: - name: clone build and install cri-o include: "build/cri-o.yml" @@ -65,7 +66,7 @@ vars_files: - "{{ playbook_dir }}/vars.yml" tags: - - e2e + - critest tasks: - name: install Golang tools include: golang.yml @@ -78,6 +79,18 @@ cri_tools_git_version: "a9e38a4a000bc1a4052fb33de1c967b8cfe9ad40" - name: run critest validation and benchmarks include: critest.yml + +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - e2e + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" - name: clone build and install kubernetes include: "build/kubernetes.yml" vars: From 0651d3a8defec8b89efe33f6040448d16fa67baf Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Fri, 3 Nov 2017 13:36:13 -0400 Subject: [PATCH 05/59] Update containers/image and containers/storage Bump containers/image to 3d0304a02154dddc8f97cc833aa0861cea5e9ade, and containers/storage to 0d32dfce498e06c132c60dac945081bf44c22464. Signed-off-by: Nalin Dahyabhai --- vendor.conf | 4 +- .../github.com/containers/image/copy/copy.go | 279 +++-- .../containers/image/copy/manifest.go | 29 +- .../github.com/containers/image/copy/sign.go | 14 +- .../image/directory/directory_dest.go | 104 +- .../image/directory/directory_src.go | 25 +- .../image/directory/directory_transport.go | 20 +- .../containers/image/docker/archive/src.go | 5 + .../image/docker/archive/transport.go | 9 +- .../containers/image/docker/daemon/client.go | 69 ++ .../image/docker/daemon/daemon_dest.go | 29 +- .../image/docker/daemon/daemon_src.go | 13 +- .../image/docker/daemon/daemon_transport.go | 11 +- .../containers/image/docker/docker_client.go | 64 -- .../containers/image/docker/docker_image.go | 10 +- .../image/docker/docker_image_dest.go | 8 +- .../image/docker/docker_image_src.go | 48 +- .../image/docker/docker_transport.go | 7 +- .../containers/image/docker/tarfile/dest.go | 11 +- .../containers/image/docker/tarfile/src.go | 46 +- .../containers/image/docker/tarfile/types.go | 38 +- .../containers/image/image/docker_list.go | 56 +- .../containers/image/image/docker_schema1.go | 292 ++---- .../containers/image/image/docker_schema2.go | 149 ++- .../containers/image/image/manifest.go | 82 +- .../containers/image/image/memory.go | 13 +- .../github.com/containers/image/image/oci.go | 113 +- .../containers/image/image/sourced.go | 50 +- .../containers/image/image/unparsed.go | 69 +- .../image/internal/tmpdir/tmpdir.go | 19 + .../image/manifest/docker_schema1.go | 310 ++++++ .../image/manifest/docker_schema2.go | 251 +++++ .../containers/image/manifest/manifest.go | 96 +- .../containers/image/manifest/oci.go | 120 +++ .../containers/image/oci/archive/oci_src.go | 27 +- .../image/oci/archive/oci_transport.go | 73 +- .../containers/image/oci/internal/oci_util.go | 126 +++ .../containers/image/oci/layout/oci_dest.go | 117 ++- .../containers/image/oci/layout/oci_src.go | 69 +- .../image/oci/layout/oci_transport.go | 100 +- .../containers/image/openshift/openshift.go | 37 +- .../image/openshift/openshift_transport.go | 9 +- .../containers/image/ostree/ostree_dest.go | 149 ++- .../containers/image/ostree/ostree_src.go | 354 +++++++ .../image/ostree/ostree_transport.go | 41 +- .../image/signature/policy_config.go | 6 +- .../containers/image/storage/storage_image.go | 970 +++++++++++------- .../image/storage/storage_reference.go | 76 +- .../image/storage/storage_transport.go | 209 +++- .../containers/image/tarball/doc.go | 48 + .../image/tarball/tarball_reference.go | 93 ++ .../containers/image/tarball/tarball_src.go | 260 +++++ .../image/tarball/tarball_transport.go | 66 ++ .../transports/alltransports/alltransports.go | 3 +- .../image/transports/alltransports/storage.go | 8 + .../transports/alltransports/storage_stub.go | 9 + .../containers/image/types/types.go | 90 +- .../github.com/containers/image/vendor.conf | 7 +- .../storage/drivers/overlay/overlay.go | 127 ++- .../containers/storage/drivers/vfs/driver.go | 5 + .../github.com/containers/storage/images.go | 103 +- .../containers/storage/images_ffjson.go | 60 +- vendor/github.com/containers/storage/store.go | 50 +- .../github.com/containers/storage/vendor.conf | 2 +- 64 files changed, 4121 insertions(+), 1636 deletions(-) create mode 100644 vendor/github.com/containers/image/docker/daemon/client.go create mode 100644 vendor/github.com/containers/image/internal/tmpdir/tmpdir.go create mode 100644 vendor/github.com/containers/image/manifest/docker_schema1.go create mode 100644 vendor/github.com/containers/image/manifest/docker_schema2.go create mode 100644 vendor/github.com/containers/image/manifest/oci.go create mode 100644 vendor/github.com/containers/image/oci/internal/oci_util.go create mode 100644 vendor/github.com/containers/image/ostree/ostree_src.go create mode 100644 vendor/github.com/containers/image/tarball/doc.go create mode 100644 vendor/github.com/containers/image/tarball/tarball_reference.go create mode 100644 vendor/github.com/containers/image/tarball/tarball_src.go create mode 100644 vendor/github.com/containers/image/tarball/tarball_transport.go create mode 100644 vendor/github.com/containers/image/transports/alltransports/storage.go create mode 100644 vendor/github.com/containers/image/transports/alltransports/storage_stub.go diff --git a/vendor.conf b/vendor.conf index 5c837b12..6e8c533a 100644 --- a/vendor.conf +++ b/vendor.conf @@ -12,10 +12,10 @@ github.com/gregjones/httpcache 787624de3eb7bd915c329cba748687a3b22666a6 github.com/json-iterator/go 1.0.0 github.com/peterbourgon/diskv v2.0.1 github.com/sirupsen/logrus v1.0.0 -github.com/containers/image 57b257d128d6075ea3287991ee408d24c7bd2758 +github.com/containers/image 3d0304a02154dddc8f97cc833aa0861cea5e9ade github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/ostreedev/ostree-go master -github.com/containers/storage d7921c6facc516358070a1306689eda18adaa20a +github.com/containers/storage 0d32dfce498e06c132c60dac945081bf44c22464 github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go index 590b3787..29065e03 100644 --- a/vendor/github.com/containers/image/copy/copy.go +++ b/vendor/github.com/containers/image/copy/copy.go @@ -12,8 +12,6 @@ import ( "strings" "time" - pb "gopkg.in/cheggaaa/pb.v1" - "github.com/containers/image/image" "github.com/containers/image/pkg/compression" "github.com/containers/image/signature" @@ -22,6 +20,7 @@ import ( "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" + pb "gopkg.in/cheggaaa/pb.v1" ) type digestingReader struct { @@ -31,23 +30,6 @@ type digestingReader struct { validationFailed bool } -// imageCopier allows us to keep track of diffID values for blobs, and other -// data, that we're copying between images, and cache other information that -// might allow us to take some shortcuts -type imageCopier struct { - copiedBlobs map[digest.Digest]digest.Digest - cachedDiffIDs map[digest.Digest]digest.Digest - manifestUpdates *types.ManifestUpdateOptions - dest types.ImageDestination - src types.Image - rawSource types.ImageSource - diffIDsAreNeeded bool - canModifyManifest bool - reportWriter io.Writer - progressInterval time.Duration - progress chan types.ProgressProperties -} - // newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error // and set validationFailed to true if the source stream does not match expectedDigest. func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) { @@ -86,6 +68,27 @@ func (d *digestingReader) Read(p []byte) (int, error) { return n, err } +// copier allows us to keep track of diffID values for blobs, and other +// data shared across one or more images in a possible manifest list. +type copier struct { + copiedBlobs map[digest.Digest]digest.Digest + cachedDiffIDs map[digest.Digest]digest.Digest + dest types.ImageDestination + rawSource types.ImageSource + reportWriter io.Writer + progressInterval time.Duration + progress chan types.ProgressProperties +} + +// imageCopier tracks state specific to a single image (possibly an item of a manifest list) +type imageCopier struct { + c *copier + manifestUpdates *types.ManifestUpdateOptions + src types.Image + diffIDsAreNeeded bool + canModifyManifest bool +} + // Options allows supplying non-default configuration modifying the behavior of CopyImage. type Options struct { RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature. @@ -95,6 +98,8 @@ type Options struct { DestinationCtx *types.SystemContext ProgressInterval time.Duration // time to wait between reports to signal the progress channel Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset. + // manifest MIME type of image set by user. "" is default and means use the autodetection to the the manifest MIME type + ForceManifestMIMEType string } // Image copies image from srcRef to destRef, using policyContext to validate @@ -115,10 +120,6 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe reportWriter = options.ReportWriter } - writeReport := func(f string, a ...interface{}) { - fmt.Fprintf(reportWriter, f, a...) - } - dest, err := destRef.NewImageDestination(options.DestinationCtx) if err != nil { return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef)) @@ -133,43 +134,89 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe if err != nil { return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef)) } - unparsedImage := image.UnparsedFromSource(rawSource) defer func() { - if unparsedImage != nil { - if err := unparsedImage.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (unparsed: %v)", err) - } + if err := rawSource.Close(); err != nil { + retErr = errors.Wrapf(retErr, " (src: %v)", err) } }() + c := &copier{ + copiedBlobs: make(map[digest.Digest]digest.Digest), + cachedDiffIDs: make(map[digest.Digest]digest.Digest), + dest: dest, + rawSource: rawSource, + reportWriter: reportWriter, + progressInterval: options.ProgressInterval, + progress: options.Progress, + } + + unparsedToplevel := image.UnparsedInstance(rawSource, nil) + multiImage, err := isMultiImage(unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(srcRef)) + } + + if !multiImage { + // The simple case: Just copy a single image. + if err := c.copyOneImage(policyContext, options, unparsedToplevel); err != nil { + return err + } + } else { + // This is a manifest list. Choose a single image and copy it. + // FIXME: Copy to destinations which support manifest lists, one image at a time. + instanceDigest, err := image.ChooseManifestInstanceFromManifestList(options.SourceCtx, unparsedToplevel) + if err != nil { + return errors.Wrapf(err, "Error choosing an image from manifest list %s", transports.ImageName(srcRef)) + } + logrus.Debugf("Source is a manifest list; copying (only) instance %s", instanceDigest) + unparsedInstance := image.UnparsedInstance(rawSource, &instanceDigest) + + if err := c.copyOneImage(policyContext, options, unparsedInstance); err != nil { + return err + } + } + + if err := c.dest.Commit(); err != nil { + return errors.Wrap(err, "Error committing the finished image") + } + + return nil +} + +// Image copies a single (on-manifest-list) image unparsedImage, using policyContext to validate +// source image admissibility. +func (c *copier) copyOneImage(policyContext *signature.PolicyContext, options *Options, unparsedImage *image.UnparsedImage) (retErr error) { + // The caller is handling manifest lists; this could happen only if a manifest list contains a manifest list. + // Make sure we fail cleanly in such cases. + multiImage, err := isMultiImage(unparsedImage) + if err != nil { + // FIXME FIXME: How to name a reference for the sub-image? + return errors.Wrapf(err, "Error determining manifest MIME type for %s", transports.ImageName(unparsedImage.Reference())) + } + if multiImage { + return fmt.Errorf("Unexpectedly received a manifest list instead of a manifest for a single image") + } + // Please keep this policy check BEFORE reading any other information about the image. + // (the multiImage check above only matches the MIME type, which we have received anyway. + // Actual parsing of anything should be deferred.) if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so. return errors.Wrap(err, "Source image rejected") } - src, err := image.FromUnparsedImage(unparsedImage) + src, err := image.FromUnparsedImage(options.SourceCtx, unparsedImage) if err != nil { - return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef)) + return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(c.rawSource.Reference())) } - unparsedImage = nil - defer func() { - if err := src.Close(); err != nil { - retErr = errors.Wrapf(retErr, " (source: %v)", err) - } - }() - if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil { + if err := checkImageDestinationForCurrentRuntimeOS(options.DestinationCtx, src, c.dest); err != nil { return err } - if src.IsMultiImage() { - return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef)) - } - var sigs [][]byte if options.RemoveSignatures { sigs = [][]byte{} } else { - writeReport("Getting image source signatures\n") + c.Printf("Getting image source signatures\n") s, err := src.Signatures(context.TODO()) if err != nil { return errors.Wrap(err, "Error reading signatures") @@ -177,41 +224,33 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe sigs = s } if len(sigs) != 0 { - writeReport("Checking if image destination supports signatures\n") - if err := dest.SupportsSignatures(); err != nil { + c.Printf("Checking if image destination supports signatures\n") + if err := c.dest.SupportsSignatures(); err != nil { return errors.Wrap(err, "Can not copy signatures") } } - canModifyManifest := len(sigs) == 0 - manifestUpdates := types.ManifestUpdateOptions{} - manifestUpdates.InformationOnly.Destination = dest + ic := imageCopier{ + c: c, + manifestUpdates: &types.ManifestUpdateOptions{InformationOnly: types.ManifestUpdateInformation{Destination: c.dest}}, + src: src, + // diffIDsAreNeeded is computed later + canModifyManifest: len(sigs) == 0, + } - if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil { + if err := ic.updateEmbeddedDockerReference(); err != nil { return err } // We compute preferredManifestMIMEType only to show it in error messages. // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed. - preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, dest.SupportedManifestMIMETypes(), canModifyManifest) + preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := ic.determineManifestConversion(c.dest.SupportedManifestMIMETypes(), options.ForceManifestMIMEType) if err != nil { return err } - // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here. - ic := imageCopier{ - copiedBlobs: make(map[digest.Digest]digest.Digest), - cachedDiffIDs: make(map[digest.Digest]digest.Digest), - manifestUpdates: &manifestUpdates, - dest: dest, - src: src, - rawSource: rawSource, - diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates), - canModifyManifest: canModifyManifest, - reportWriter: reportWriter, - progressInterval: options.ProgressInterval, - progress: options.Progress, - } + // If src.UpdatedImageNeedsLayerDiffIDs(ic.manifestUpdates) will be true, it needs to be true by the time we get here. + ic.diffIDsAreNeeded = src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) if err := ic.copyLayers(); err != nil { return err @@ -233,9 +272,9 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType. // So if we are here, we will definitely be trying to convert the manifest. - // With !canModifyManifest, that would just be a string of repeated failures for the same reason, + // With !ic.canModifyManifest, that would just be a string of repeated failures for the same reason, // so let’s bail out early and with a better error message. - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)") } @@ -243,7 +282,7 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)} for _, manifestMIMEType := range otherManifestMIMETypeCandidates { logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType) - manifestUpdates.ManifestMIMEType = manifestMIMEType + ic.manifestUpdates.ManifestMIMEType = manifestMIMEType attemptedManifest, err := ic.copyUpdatedConfigAndManifest() if err != nil { logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err) @@ -262,35 +301,44 @@ func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageRe } if options.SignBy != "" { - newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter) + newSig, err := c.createSignature(manifest, options.SignBy) if err != nil { return err } sigs = append(sigs, newSig) } - writeReport("Storing signatures\n") - if err := dest.PutSignatures(sigs); err != nil { + c.Printf("Storing signatures\n") + if err := c.dest.PutSignatures(sigs); err != nil { return errors.Wrap(err, "Error writing signatures") } - if err := dest.Commit(); err != nil { - return errors.Wrap(err, "Error committing the finished image") - } - return nil } -func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error { +// Printf writes a formatted string to c.reportWriter. +// Note that the method name Printf is not entirely arbitrary: (go tool vet) +// has a built-in list of functions/methods (whatever object they are for) +// which have their format strings checked; for other names we would have +// to pass a parameter to every (go tool vet) invocation. +func (c *copier) Printf(format string, a ...interface{}) { + fmt.Fprintf(c.reportWriter, format, a...) +} + +func checkImageDestinationForCurrentRuntimeOS(ctx *types.SystemContext, src types.Image, dest types.ImageDestination) error { if dest.MustMatchRuntimeOS() { + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } c, err := src.OCIConfig() if err != nil { return errors.Wrapf(err, "Error parsing image configuration") } - osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS) - if runtime.GOOS == "windows" && c.OS == "linux" { + osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, wantedOS) + if wantedOS == "windows" && c.OS == "linux" { return osErr - } else if runtime.GOOS != "windows" && c.OS == "windows" { + } else if wantedOS != "windows" && c.OS == "windows" { return osErr } } @@ -298,35 +346,44 @@ func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageD } // updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests. -func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error { - destRef := dest.Reference().DockerReference() +func (ic *imageCopier) updateEmbeddedDockerReference() error { + destRef := ic.c.dest.Reference().DockerReference() if destRef == nil { return nil // Destination does not care about Docker references } - if !src.EmbeddedDockerReferenceConflicts(destRef) { + if !ic.src.EmbeddedDockerReferenceConflicts(destRef) { return nil // No reference embedded in the manifest, or it matches destRef already. } - if !canModifyManifest { + if !ic.canModifyManifest { return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway", - transports.ImageName(dest.Reference()), destRef.String()) + transports.ImageName(ic.c.dest.Reference()), destRef.String()) } - manifestUpdates.EmbeddedDockerReference = destRef + ic.manifestUpdates.EmbeddedDockerReference = destRef return nil } -// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. +// copyLayers copies layers from ic.src/ic.c.rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest. func (ic *imageCopier) copyLayers() error { srcInfos := ic.src.LayerInfos() destInfos := []types.BlobInfo{} diffIDs := []digest.Digest{} + updatedSrcInfos := ic.src.LayerInfosForCopy() + srcInfosUpdated := false + if updatedSrcInfos != nil && !reflect.DeepEqual(srcInfos, updatedSrcInfos) { + if !ic.canModifyManifest { + return errors.Errorf("Internal error: copyLayers() needs to use an updated manifest but that was known to be forbidden") + } + srcInfos = updatedSrcInfos + srcInfosUpdated = true + } for _, srcLayer := range srcInfos { var ( destInfo types.BlobInfo diffID digest.Digest err error ) - if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { + if ic.c.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 { // DiffIDs are, currently, needed only when converting from schema1. // In which case src.LayerInfos will not have URLs because schema1 // does not support them. @@ -334,7 +391,7 @@ func (ic *imageCopier) copyLayers() error { return errors.New("getting DiffID for foreign layers is unimplemented") } destInfo = srcLayer - fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name()) + ic.c.Printf("Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.c.dest.Reference().Transport().Name()) } else { destInfo, diffID, err = ic.copyLayer(srcLayer) if err != nil { @@ -348,7 +405,7 @@ func (ic *imageCopier) copyLayers() error { if ic.diffIDsAreNeeded { ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs } - if layerDigestsDiffer(srcInfos, destInfos) { + if srcInfosUpdated || layerDigestsDiffer(srcInfos, destInfos) { ic.manifestUpdates.LayerInfos = destInfos } return nil @@ -379,7 +436,7 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion. // So, this can only happen if we are trying to upload using one of the other MIME type candidates. // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise - // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. + // when ic.c.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2. // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now. // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates. return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType) @@ -395,27 +452,27 @@ func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) { return nil, errors.Wrap(err, "Error reading manifest") } - if err := ic.copyConfig(pendingImage); err != nil { + if err := ic.c.copyConfig(pendingImage); err != nil { return nil, err } - fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n") - if err := ic.dest.PutManifest(manifest); err != nil { + ic.c.Printf("Writing manifest to image destination\n") + if err := ic.c.dest.PutManifest(manifest); err != nil { return nil, errors.Wrap(err, "Error writing manifest") } return manifest, nil } // copyConfig copies config.json, if any, from src to dest. -func (ic *imageCopier) copyConfig(src types.Image) error { +func (c *copier) copyConfig(src types.Image) error { srcInfo := src.ConfigInfo() if srcInfo.Digest != "" { - fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest) + c.Printf("Copying config %s\n", srcInfo.Digest) configBlob, err := src.ConfigBlob() if err != nil { return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest) } - destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) + destInfo, err := c.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false) if err != nil { return err } @@ -437,12 +494,12 @@ type diffIDResult struct { // and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) { // Check if we already have a blob with this digest - haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo) + haveBlob, extantBlobSize, err := ic.c.dest.HasBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest) } // If we already have a cached diffID for this blob, we don't need to compute it - diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "") + diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.c.cachedDiffIDs[srcInfo.Digest] == "") // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again if haveBlob && !diffIDIsNeeded { // Check the blob sizes match, if we were given a size this time @@ -451,17 +508,17 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest } srcInfo.Size = extantBlobSize // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob - blobinfo, err := ic.dest.ReapplyBlob(srcInfo) + blobinfo, err := ic.c.dest.ReapplyBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest) } - fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest) - return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err + ic.c.Printf("Skipping fetch of repeat blob %s\n", srcInfo.Digest) + return blobinfo, ic.c.cachedDiffIDs[srcInfo.Digest], err } // Fallback: copy the layer, computing the diffID if we need to do so - fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest) - srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo) + ic.c.Printf("Copying blob %s\n", srcInfo.Digest) + srcStream, srcBlobSize, err := ic.c.rawSource.GetBlob(srcInfo) if err != nil { return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest) } @@ -479,7 +536,7 @@ func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID") } logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest) - ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest + ic.c.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest } return blobInfo, diffIDResult.digest, nil } @@ -513,7 +570,7 @@ func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.Bl return pipeWriter } } - blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success + blobInfo, err := ic.c.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success return blobInfo, diffIDChan, err // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan } @@ -547,7 +604,7 @@ func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) // perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil, // perhaps compressing it if canCompress, // and returns a complete blobInfo of the copied blob. -func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, +func (c *copier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo, getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer, canCompress bool) (types.BlobInfo, error) { // The copying happens through a pipeline of connected io.Readers. @@ -575,7 +632,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Report progress using a pb.Reader. bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES) - bar.Output = ic.reportWriter + bar.Output = c.reportWriter bar.SetMaxWidth(80) bar.ShowTimeLeft = false bar.ShowPercent = false @@ -592,7 +649,7 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo // === Compress the layer if it is uncompressed and compression is desired var inputInfo types.BlobInfo - if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() { + if !canCompress || isCompressed || !c.dest.ShouldCompressLayers() { logrus.Debugf("Using original blob without modification") inputInfo = srcInfo } else { @@ -609,19 +666,19 @@ func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.Blo inputInfo.Size = -1 } - // === Report progress using the ic.progress channel, if required. - if ic.progress != nil && ic.progressInterval > 0 { + // === Report progress using the c.progress channel, if required. + if c.progress != nil && c.progressInterval > 0 { destStream = &progressReader{ source: destStream, - channel: ic.progress, - interval: ic.progressInterval, + channel: c.progress, + interval: c.progressInterval, artifact: srcInfo, lastTime: time.Now(), } } // === Finally, send the layer stream to dest. - uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo) + uploadedInfo, err := c.dest.PutBlob(destStream, inputInfo) if err != nil { return types.BlobInfo{}, errors.Wrap(err, "Error writing blob") } diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go index e3b294dd..7e4cd10e 100644 --- a/vendor/github.com/containers/image/copy/manifest.go +++ b/vendor/github.com/containers/image/copy/manifest.go @@ -37,16 +37,20 @@ func (os *orderedSet) append(s string) { } } -// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest. -// Note that the conversion will only happen later, through src.UpdatedImage +// determineManifestConversion updates ic.manifestUpdates to convert manifest to a supported MIME type, if necessary and ic.canModifyManifest. +// Note that the conversion will only happen later, through ic.src.UpdatedImage // Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified), // and a list of other possible alternatives, in order. -func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) { - _, srcType, err := src.Manifest() +func (ic *imageCopier) determineManifestConversion(destSupportedManifestMIMETypes []string, forceManifestMIMEType string) (string, []string, error) { + _, srcType, err := ic.src.Manifest() if err != nil { // This should have been cached?! return "", nil, errors.Wrap(err, "Error reading manifest") } + if forceManifestMIMEType != "" { + destSupportedManifestMIMETypes = []string{forceManifestMIMEType} + } + if len(destSupportedManifestMIMETypes) == 0 { return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions. } @@ -67,10 +71,10 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s if _, ok := supportedByDest[srcType]; ok { prioritizedTypes.append(srcType) } - if !canModifyManifest { - // We could also drop the !canModifyManifest parameter and have the caller + if !ic.canModifyManifest { + // We could also drop the !ic.canModifyManifest check and have the caller // make the choice; it is already doing that to an extent, to improve error - // messages. But it is nice to hide the “if !canModifyManifest, do no conversion” + // messages. But it is nice to hide the “if !ic.canModifyManifest, do no conversion” // special case in here; the caller can then worry (or not) only about a good UI. logrus.Debugf("We can't modify the manifest, hoping for the best...") return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying? @@ -94,9 +98,18 @@ func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, s } preferredType := prioritizedTypes.list[0] if preferredType != srcType { - manifestUpdates.ManifestMIMEType = preferredType + ic.manifestUpdates.ManifestMIMEType = preferredType } else { logrus.Debugf("... will first try using the original manifest unmodified") } return preferredType, prioritizedTypes.list[1:], nil } + +// isMultiImage returns true if img is a list of images +func isMultiImage(img types.UnparsedImage) (bool, error) { + _, mt, err := img.Manifest() + if err != nil { + return false, err + } + return manifest.MIMETypeIsMultiImage(mt), nil +} diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go index 9187d70b..91394d2b 100644 --- a/vendor/github.com/containers/image/copy/sign.go +++ b/vendor/github.com/containers/image/copy/sign.go @@ -1,17 +1,13 @@ package copy import ( - "fmt" - "io" - "github.com/containers/image/signature" "github.com/containers/image/transports" - "github.com/containers/image/types" "github.com/pkg/errors" ) -// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity. -func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) { +// createSignature creates a new signature of manifest using keyIdentity. +func (c *copier) createSignature(manifest []byte, keyIdentity string) ([]byte, error) { mech, err := signature.NewGPGSigningMechanism() if err != nil { return nil, errors.Wrap(err, "Error initializing GPG") @@ -21,12 +17,12 @@ func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity s return nil, errors.Wrap(err, "Signing not supported") } - dockerReference := dest.Reference().DockerReference() + dockerReference := c.dest.Reference().DockerReference() if dockerReference == nil { - return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference())) + return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(c.dest.Reference())) } - fmt.Fprintf(reportWriter, "Signing manifest\n") + c.Printf("Signing manifest\n") newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity) if err != nil { return nil, errors.Wrap(err, "Error creating signature") diff --git a/vendor/github.com/containers/image/directory/directory_dest.go b/vendor/github.com/containers/image/directory/directory_dest.go index ea46a27e..47d59d9f 100644 --- a/vendor/github.com/containers/image/directory/directory_dest.go +++ b/vendor/github.com/containers/image/directory/directory_dest.go @@ -4,19 +4,77 @@ import ( "io" "io/ioutil" "os" + "path/filepath" "github.com/containers/image/types" "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) +const version = "Directory Transport Version: 1.0\n" + +// ErrNotContainerImageDir indicates that the directory doesn't match the expected contents of a directory created +// using the 'dir' transport +var ErrNotContainerImageDir = errors.New("not a containers image directory, don't want to overwrite important data") + type dirImageDestination struct { - ref dirReference + ref dirReference + compress bool } -// newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref dirReference) types.ImageDestination { - return &dirImageDestination{ref} +// newImageDestination returns an ImageDestination for writing to a directory. +func newImageDestination(ref dirReference, compress bool) (types.ImageDestination, error) { + d := &dirImageDestination{ref: ref, compress: compress} + + // If directory exists check if it is empty + // if not empty, check whether the contents match that of a container image directory and overwrite the contents + // if the contents don't match throw an error + dirExists, err := pathExists(d.ref.resolvedPath) + if err != nil { + return nil, errors.Wrapf(err, "error checking for path %q", d.ref.resolvedPath) + } + if dirExists { + isEmpty, err := isDirEmpty(d.ref.resolvedPath) + if err != nil { + return nil, err + } + + if !isEmpty { + versionExists, err := pathExists(d.ref.versionPath()) + if err != nil { + return nil, errors.Wrapf(err, "error checking if path exists %q", d.ref.versionPath()) + } + if versionExists { + contents, err := ioutil.ReadFile(d.ref.versionPath()) + if err != nil { + return nil, err + } + // check if contents of version file is what we expect it to be + if string(contents) != version { + return nil, ErrNotContainerImageDir + } + } else { + return nil, ErrNotContainerImageDir + } + // delete directory contents so that only one image is in the directory at a time + if err = removeDirContents(d.ref.resolvedPath); err != nil { + return nil, errors.Wrapf(err, "error erasing contents in %q", d.ref.resolvedPath) + } + logrus.Debugf("overwriting existing container image directory %q", d.ref.resolvedPath) + } + } else { + // create directory if it doesn't exist + if err := os.MkdirAll(d.ref.resolvedPath, 0755); err != nil { + return nil, errors.Wrapf(err, "unable to create directory %q", d.ref.resolvedPath) + } + } + // create version file + err = ioutil.WriteFile(d.ref.versionPath(), []byte(version), 0755) + if err != nil { + return nil, errors.Wrapf(err, "error creating version file %q", d.ref.versionPath()) + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -42,7 +100,7 @@ func (d *dirImageDestination) SupportsSignatures() error { // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination. func (d *dirImageDestination) ShouldCompressLayers() bool { - return false + return d.compress } // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually @@ -147,3 +205,39 @@ func (d *dirImageDestination) PutSignatures(signatures [][]byte) error { func (d *dirImageDestination) Commit() error { return nil } + +// returns true if path exists +func pathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if err != nil && os.IsNotExist(err) { + return false, nil + } + return false, err +} + +// returns true if directory is empty +func isDirEmpty(path string) (bool, error) { + files, err := ioutil.ReadDir(path) + if err != nil { + return false, err + } + return len(files) == 0, nil +} + +// deletes the contents of a directory +func removeDirContents(path string) error { + files, err := ioutil.ReadDir(path) + if err != nil { + return err + } + + for _, file := range files { + if err := os.RemoveAll(filepath.Join(path, file.Name())); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_src.go b/vendor/github.com/containers/image/directory/directory_src.go index fddc1c52..0a8acf6b 100644 --- a/vendor/github.com/containers/image/directory/directory_src.go +++ b/vendor/github.com/containers/image/directory/directory_src.go @@ -35,7 +35,12 @@ func (s *dirImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dirImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) + } m, err := ioutil.ReadFile(s.ref.manifestPath()) if err != nil { return nil, "", err @@ -43,10 +48,6 @@ func (s *dirImageSource) GetManifest() ([]byte, string, error) { return m, manifest.GuessMIMEType(m), err } -func (s *dirImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return nil, "", errors.Errorf(`Getting target manifest not supported by "dir:"`) -} - // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { r, err := os.Open(s.ref.layerPath(info.Digest)) @@ -60,7 +61,14 @@ func (s *dirImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dirImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.Errorf(`Manifests lists are not supported by "dir:"`) + } signatures := [][]byte{} for i := 0; ; i++ { signature, err := ioutil.ReadFile(s.ref.signaturePath(i)) @@ -74,3 +82,8 @@ func (s *dirImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { } return signatures, nil } + +// LayerInfosForCopy() returns updated layer info that should be used when copying, in preference to values in the manifest, if specified. +func (s *dirImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/directory/directory_transport.go b/vendor/github.com/containers/image/directory/directory_transport.go index b9ce01a2..c3875308 100644 --- a/vendor/github.com/containers/image/directory/directory_transport.go +++ b/vendor/github.com/containers/image/directory/directory_transport.go @@ -134,13 +134,14 @@ func (ref dirReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dirReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dirReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ref) - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. @@ -152,7 +153,11 @@ func (ref dirReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref dirReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref), nil + compress := false + if ctx != nil { + compress = ctx.DirForceCompress + } + return newImageDestination(ref, compress) } // DeleteImage deletes the named image from the registry, if supported. @@ -175,3 +180,8 @@ func (ref dirReference) layerPath(digest digest.Digest) string { func (ref dirReference) signaturePath(index int) string { return filepath.Join(ref.path, fmt.Sprintf("signature-%d", index+1)) } + +// versionPath returns a path for the version file within a directory using our conventions. +func (ref dirReference) versionPath() string { + return filepath.Join(ref.path, "version") +} diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go index aebcaa82..b2ffd965 100644 --- a/vendor/github.com/containers/image/docker/archive/src.go +++ b/vendor/github.com/containers/image/docker/archive/src.go @@ -34,3 +34,8 @@ func (s *archiveImageSource) Reference() types.ImageReference { func (s *archiveImageSource) Close() error { return nil } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *archiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go index f38d4ace..047df73d 100644 --- a/vendor/github.com/containers/image/docker/archive/transport.go +++ b/vendor/github.com/containers/image/docker/archive/transport.go @@ -125,13 +125,14 @@ func (ref archiveReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src := newImageSource(ctx, ref) - return ctrImage.FromSource(src) + return ctrImage.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/docker/daemon/client.go b/vendor/github.com/containers/image/docker/daemon/client.go new file mode 100644 index 00000000..82fab4b1 --- /dev/null +++ b/vendor/github.com/containers/image/docker/daemon/client.go @@ -0,0 +1,69 @@ +package daemon + +import ( + "net/http" + "path/filepath" + + "github.com/containers/image/types" + dockerclient "github.com/docker/docker/client" + "github.com/docker/go-connections/tlsconfig" +) + +const ( + // The default API version to be used in case none is explicitly specified + defaultAPIVersion = "1.22" +) + +// NewDockerClient initializes a new API client based on the passed SystemContext. +func newDockerClient(ctx *types.SystemContext) (*dockerclient.Client, error) { + host := dockerclient.DefaultDockerHost + if ctx != nil && ctx.DockerDaemonHost != "" { + host = ctx.DockerDaemonHost + } + + // Sadly, unix:// sockets don't work transparently with dockerclient.NewClient. + // They work fine with a nil httpClient; with a non-nil httpClient, the transport’s + // TLSClientConfig must be nil (or the client will try using HTTPS over the PF_UNIX socket + // regardless of the values in the *tls.Config), and we would have to call sockets.ConfigureTransport. + // + // We don't really want to configure anything for unix:// sockets, so just pass a nil *http.Client. + proto, _, _, err := dockerclient.ParseHost(host) + if err != nil { + return nil, err + } + var httpClient *http.Client + if proto != "unix" { + hc, err := tlsConfig(ctx) + if err != nil { + return nil, err + } + httpClient = hc + } + + return dockerclient.NewClient(host, defaultAPIVersion, httpClient, nil) +} + +func tlsConfig(ctx *types.SystemContext) (*http.Client, error) { + options := tlsconfig.Options{} + if ctx != nil && ctx.DockerDaemonInsecureSkipTLSVerify { + options.InsecureSkipVerify = true + } + + if ctx != nil && ctx.DockerDaemonCertPath != "" { + options.CAFile = filepath.Join(ctx.DockerDaemonCertPath, "ca.pem") + options.CertFile = filepath.Join(ctx.DockerDaemonCertPath, "cert.pem") + options.KeyFile = filepath.Join(ctx.DockerDaemonCertPath, "key.pem") + } + + tlsc, err := tlsconfig.Client(options) + if err != nil { + return nil, err + } + + return &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: tlsc, + }, + CheckRedirect: dockerclient.CheckRedirect, + }, nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go index 559e5c71..f73ac233 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_dest.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_dest.go @@ -14,6 +14,7 @@ import ( type daemonImageDestination struct { ref daemonReference + mustMatchRuntimeOS bool *tarfile.Destination // Implements most of types.ImageDestination // For talking to imageLoadGoroutine goroutineCancel context.CancelFunc @@ -24,7 +25,7 @@ type daemonImageDestination struct { } // newImageDestination returns a types.ImageDestination for the specified image reference. -func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref daemonReference) (types.ImageDestination, error) { if ref.ref == nil { return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } @@ -33,7 +34,12 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t return nil, errors.Errorf("Invalid destination docker-daemon:%s: a destination must be a name:tag", ref.StringWithinTransport()) } - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + var mustMatchRuntimeOS = true + if ctx != nil && ctx.DockerDaemonHost != client.DefaultDockerHost { + mustMatchRuntimeOS = false + } + + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -42,16 +48,17 @@ func newImageDestination(systemCtx *types.SystemContext, ref daemonReference) (t // Commit() may never be called, so we may never read from this channel; so, make this buffered to allow imageLoadGoroutine to write status and terminate even if we never read it. statusChannel := make(chan error, 1) - ctx, goroutineCancel := context.WithCancel(context.Background()) - go imageLoadGoroutine(ctx, c, reader, statusChannel) + goroutineContext, goroutineCancel := context.WithCancel(context.Background()) + go imageLoadGoroutine(goroutineContext, c, reader, statusChannel) return &daemonImageDestination{ - ref: ref, - Destination: tarfile.NewDestination(writer, namedTaggedRef), - goroutineCancel: goroutineCancel, - statusChannel: statusChannel, - writer: writer, - committed: false, + ref: ref, + mustMatchRuntimeOS: mustMatchRuntimeOS, + Destination: tarfile.NewDestination(writer, namedTaggedRef), + goroutineCancel: goroutineCancel, + statusChannel: statusChannel, + writer: writer, + committed: false, }, nil } @@ -80,7 +87,7 @@ func imageLoadGoroutine(ctx context.Context, c *client.Client, reader *io.PipeRe // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. func (d *daemonImageDestination) MustMatchRuntimeOS() bool { - return true + return d.mustMatchRuntimeOS } // Close removes resources associated with an initialized ImageDestination, if any. diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_src.go b/vendor/github.com/containers/image/docker/daemon/daemon_src.go index 644dbeec..5cf7679b 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_src.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_src.go @@ -6,14 +6,12 @@ import ( "os" "github.com/containers/image/docker/tarfile" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/types" - "github.com/docker/docker/client" "github.com/pkg/errors" "golang.org/x/net/context" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - type daemonImageSource struct { ref daemonReference *tarfile.Source // Implements most of types.ImageSource @@ -35,7 +33,7 @@ type layerInfo struct { // is the config, and that the following len(RootFS) files are the layers, but that feels // way too brittle.) func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageSource, error) { - c, err := client.NewClient(client.DefaultDockerHost, "1.22", nil, nil) // FIXME: overridable host + c, err := newDockerClient(ctx) if err != nil { return nil, errors.Wrap(err, "Error initializing docker engine client") } @@ -48,7 +46,7 @@ func newImageSource(ctx *types.SystemContext, ref daemonReference) (types.ImageS defer inputStream.Close() // FIXME: use SystemContext here. - tarCopyFile, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-daemon-tar") + tarCopyFile, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-daemon-tar") if err != nil { return nil, err } @@ -83,3 +81,8 @@ func (s *daemonImageSource) Reference() types.ImageReference { func (s *daemonImageSource) Close() error { return os.Remove(s.tarCopyPath) } + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *daemonImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go index 41be1b2d..8ad6b521 100644 --- a/vendor/github.com/containers/image/docker/daemon/daemon_transport.go +++ b/vendor/github.com/containers/image/docker/daemon/daemon_transport.go @@ -151,14 +151,17 @@ func (ref daemonReference) PolicyConfigurationNamespaces() []string { return []string{} } -// NewImage returns a types.Image for this reference. -// The caller must call .Close() on the returned Image. -func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref daemonReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go index 24b82d6f..217e9dcb 100644 --- a/vendor/github.com/containers/image/docker/docker_client.go +++ b/vendor/github.com/containers/image/docker/docker_client.go @@ -8,7 +8,6 @@ import ( "io" "io/ioutil" "net/http" - "os" "path/filepath" "strings" "time" @@ -125,69 +124,6 @@ func dockerCertDir(ctx *types.SystemContext, hostPort string) string { return filepath.Join(hostCertDir, hostPort) } -func setupCertificates(dir string, tlsc *tls.Config) error { - logrus.Debugf("Looking for TLS certificates and private keys in %s", dir) - fs, err := ioutil.ReadDir(dir) - if err != nil { - if os.IsNotExist(err) { - return nil - } - if os.IsPermission(err) { - logrus.Debugf("Skipping scan of %s due to permission error: %v", dir, err) - return nil - } - return err - } - - for _, f := range fs { - fullPath := filepath.Join(dir, f.Name()) - if strings.HasSuffix(f.Name(), ".crt") { - systemPool, err := tlsconfig.SystemCertPool() - if err != nil { - return errors.Wrap(err, "unable to get system cert pool") - } - tlsc.RootCAs = systemPool - logrus.Debugf(" crt: %s", fullPath) - data, err := ioutil.ReadFile(fullPath) - if err != nil { - return err - } - tlsc.RootCAs.AppendCertsFromPEM(data) - } - if strings.HasSuffix(f.Name(), ".cert") { - certName := f.Name() - keyName := certName[:len(certName)-5] + ".key" - logrus.Debugf(" cert: %s", fullPath) - if !hasFile(fs, keyName) { - return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) - } - cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName)) - if err != nil { - return err - } - tlsc.Certificates = append(tlsc.Certificates, cert) - } - if strings.HasSuffix(f.Name(), ".key") { - keyName := f.Name() - certName := keyName[:len(keyName)-4] + ".cert" - logrus.Debugf(" key: %s", fullPath) - if !hasFile(fs, certName) { - return errors.Errorf("missing client certificate %s for key %s", certName, keyName) - } - } - } - return nil -} - -func hasFile(files []os.FileInfo, name string) bool { - for _, f := range files { - if f.Name() == name { - return true - } - } - return false -} - // newDockerClientFromRef returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry) // “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection) func newDockerClientFromRef(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) { diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go index 8be35b73..2148ed8b 100644 --- a/vendor/github.com/containers/image/docker/docker_image.go +++ b/vendor/github.com/containers/image/docker/docker_image.go @@ -12,26 +12,26 @@ import ( "github.com/pkg/errors" ) -// Image is a Docker-specific implementation of types.Image with a few extra methods +// Image is a Docker-specific implementation of types.ImageCloser with a few extra methods // which are specific to Docker. type Image struct { - types.Image + types.ImageCloser src *dockerImageSource } // newImage returns a new Image interface type after setting up // a client to the registry hosting the given image. // The caller must call .Close() on the returned Image. -func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) { +func newImage(ctx *types.SystemContext, ref dockerReference) (types.ImageCloser, error) { s, err := newImageSource(ctx, ref) if err != nil { return nil, err } - img, err := image.FromSource(s) + img, err := image.FromSource(ctx, s) if err != nil { return nil, err } - return &Image{Image: img, src: s}, nil + return &Image{ImageCloser: img, src: s}, nil } // SourceRefFullName returns a fully expanded name for the repository this image is in. diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go index 32d5a18b..79c38622 100644 --- a/vendor/github.com/containers/image/docker/docker_image_dest.go +++ b/vendor/github.com/containers/image/docker/docker_image_dest.go @@ -236,7 +236,7 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return err } defer res.Body.Close() - if res.StatusCode != http.StatusCreated { + if !successStatus(res.StatusCode) { err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path) if isManifestInvalidError(errors.Cause(err)) { err = types.ManifestTypeRejectedError{Err: err} @@ -246,6 +246,12 @@ func (d *dockerImageDestination) PutManifest(m []byte) error { return nil } +// successStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func successStatus(status int) bool { + return status >= 200 && status <= 399 +} + // isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error. func isManifestInvalidError(err error) bool { errors, ok := err.(errcode.Errors) diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go index 232c3cf9..63bfe8aa 100644 --- a/vendor/github.com/containers/image/docker/docker_image_src.go +++ b/vendor/github.com/containers/image/docker/docker_image_src.go @@ -52,6 +52,11 @@ func (s *dockerImageSource) Close() error { return nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *dockerImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1) // Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string. func simplifyContentType(contentType string) string { @@ -67,7 +72,12 @@ func simplifyContentType(contentType string) string { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *dockerImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return s.fetchManifest(context.TODO(), instanceDigest.String()) + } err := s.ensureManifestIsLoaded(context.TODO()) if err != nil { return nil, "", err @@ -94,18 +104,12 @@ func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest strin return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil } -// GetTargetManifest returns an image's manifest given a digest. -// This is mainly used to retrieve a single image's manifest out of a manifest list. -func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.fetchManifest(context.TODO(), digest.String()) -} - // ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType // // ImageSource implementations are not required or expected to do any caching, // but because our signatures are “attached” to the manifest digest, -// we need to ensure that the digest of the manifest returned by GetManifest -// and used by GetSignatures are consistent, otherwise we would get spurious +// we need to ensure that the digest of the manifest returned by GetManifest(nil) +// and used by GetSignatures(ctx, nil) are consistent, otherwise we would get spurious // signature verification failures when pulling while a tag is being updated. func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error { if s.cachedManifest != nil { @@ -176,22 +180,30 @@ func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, return res.Body, getBlobSize(res), nil } -func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *dockerImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { if err := s.c.detectProperties(ctx); err != nil { return nil, err } switch { case s.c.signatureBase != nil: - return s.getSignaturesFromLookaside(ctx) + return s.getSignaturesFromLookaside(ctx, instanceDigest) case s.c.supportsSignatures: - return s.getSignaturesFromAPIExtension(ctx) + return s.getSignaturesFromAPIExtension(ctx, instanceDigest) default: return [][]byte{}, nil } } -// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest. -func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) { +// manifestDigest returns a digest of the manifest, from instanceDigest if non-nil; or from the supplied reference, +// or finally, from a fetched manifest. +func (s *dockerImageSource) manifestDigest(ctx context.Context, instanceDigest *digest.Digest) (digest.Digest, error) { + if instanceDigest != nil { + return *instanceDigest, nil + } if digested, ok := s.ref.ref.(reference.Digested); ok { d := digested.Digest() if d.Algorithm() == digest.Canonical { @@ -206,8 +218,8 @@ func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, // getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase, // which is not nil. -func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } @@ -276,8 +288,8 @@ func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) ( } // getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension. -func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) { - manifestDigest, err := s.manifestDigest(ctx) +func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + manifestDigest, err := s.manifestDigest(ctx, instanceDigest) if err != nil { return nil, err } diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go index 1d67cc4f..cc0aa298 100644 --- a/vendor/github.com/containers/image/docker/docker_transport.go +++ b/vendor/github.com/containers/image/docker/docker_transport.go @@ -122,11 +122,12 @@ func (ref dockerReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.ref) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { return newImage(ctx, ref) } diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go index 72c85c70..eb11ca86 100644 --- a/vendor/github.com/containers/image/docker/tarfile/dest.go +++ b/vendor/github.com/containers/image/docker/tarfile/dest.go @@ -11,6 +11,7 @@ import ( "time" "github.com/containers/image/docker/reference" + "github.com/containers/image/internal/tmpdir" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -18,8 +19,6 @@ import ( "github.com/sirupsen/logrus" ) -const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. - // Destination is a partial implementation of types.ImageDestination for writing to an io.Writer. type Destination struct { writer io.Writer @@ -107,7 +106,7 @@ func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size. logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...") - streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob") + streamCopy, err := ioutil.TempFile(tmpdir.TemporaryDirectoryForBigFiles(), "docker-tarfile-blob") if err != nil { return types.BlobInfo{}, err } @@ -168,7 +167,7 @@ func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) { func (d *Destination) PutManifest(m []byte) error { // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative, // so the caller trying a different manifest kind would be pointless. - var man schema2Manifest + var man manifest.Schema2 if err := json.Unmarshal(m, &man); err != nil { return errors.Wrap(err, "Error parsing manifest") } @@ -177,12 +176,12 @@ func (d *Destination) PutManifest(m []byte) error { } layerPaths := []string{} - for _, l := range man.Layers { + for _, l := range man.LayersDescriptors { layerPaths = append(layerPaths, l.Digest.String()) } items := []ManifestItem{{ - Config: man.Config.Digest.String(), + Config: man.ConfigDescriptor.Digest.String(), RepoTags: []string{d.repoTag}, Layers: layerPaths, Parent: "", diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go index f77cb713..a18e2105 100644 --- a/vendor/github.com/containers/image/docker/tarfile/src.go +++ b/vendor/github.com/containers/image/docker/tarfile/src.go @@ -24,8 +24,8 @@ type Source struct { tarManifest *ManifestItem // nil if not available yet. configBytes []byte configDigest digest.Digest - orderedDiffIDList []diffID - knownLayers map[diffID]*layerInfo + orderedDiffIDList []digest.Digest + knownLayers map[digest.Digest]*layerInfo // Other state generatedManifest []byte // Private cache for GetManifest(), nil if not set yet. } @@ -156,7 +156,7 @@ func (s *Source) ensureCachedDataIsPresent() error { if err != nil { return err } - var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs. + var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs. if err := json.Unmarshal(configBytes, &parsedConfig); err != nil { return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config) } @@ -194,12 +194,12 @@ func (s *Source) LoadTarManifest() ([]ManifestItem, error) { return s.loadTarManifest() } -func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) { +func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) { // Collect layer data available in manifest and config. if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) { return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs)) } - knownLayers := map[diffID]*layerInfo{} + knownLayers := map[digest.Digest]*layerInfo{} unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes. for i, diffID := range parsedConfig.RootFS.DiffIDs { if _, ok := knownLayers[diffID]; ok { @@ -249,28 +249,34 @@ func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *Source) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *Source) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } if s.generatedManifest == nil { if err := s.ensureCachedDataIsPresent(); err != nil { return nil, "", err } - m := schema2Manifest{ + m := manifest.Schema2{ SchemaVersion: 2, MediaType: manifest.DockerV2Schema2MediaType, - Config: distributionDescriptor{ + ConfigDescriptor: manifest.Schema2Descriptor{ MediaType: manifest.DockerV2Schema2ConfigMediaType, Size: int64(len(s.configBytes)), Digest: s.configDigest, }, - Layers: []distributionDescriptor{}, + LayersDescriptors: []manifest.Schema2Descriptor{}, } for _, diffID := range s.orderedDiffIDList { li, ok := s.knownLayers[diffID] if !ok { return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID) } - m.Layers = append(m.Layers, distributionDescriptor{ - Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball + m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{ + Digest: diffID, // diffID is a digest of the uncompressed tarball MediaType: manifest.DockerV2Schema2LayerMediaType, Size: li.size, }) @@ -284,13 +290,6 @@ func (s *Source) GetManifest() ([]byte, string, error) { return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil } -// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest -// out of a manifest list. -func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType. - return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) -} - type readCloseWrapper struct { io.Reader closeFunc func() error @@ -313,7 +312,7 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil } - if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball, + if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball, stream, err := s.openTarComponent(li.path) if err != nil { return nil, 0, err @@ -355,6 +354,13 @@ func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { } // GetSignatures returns the image's signatures. It may use a remote (= slow) service. -func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *Source) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + // How did we even get here? GetManifest(nil) has returned a manifest.DockerV2Schema2MediaType. + return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`) + } return [][]byte{}, nil } diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go index f16cc8c6..2aa56754 100644 --- a/vendor/github.com/containers/image/docker/tarfile/types.go +++ b/vendor/github.com/containers/image/docker/tarfile/types.go @@ -1,6 +1,9 @@ package tarfile -import "github.com/opencontainers/go-digest" +import ( + "github.com/containers/image/manifest" + "github.com/opencontainers/go-digest" +) // Various data structures. @@ -18,37 +21,8 @@ type ManifestItem struct { Config string RepoTags []string Layers []string - Parent imageID `json:",omitempty"` - LayerSources map[diffID]distributionDescriptor `json:",omitempty"` + Parent imageID `json:",omitempty"` + LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"` } type imageID string -type diffID digest.Digest - -// Based on github.com/docker/distribution/blobs.go -type distributionDescriptor struct { - MediaType string `json:"mediaType,omitempty"` - Size int64 `json:"size,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - URLs []string `json:"urls,omitempty"` -} - -// Based on github.com/docker/distribution/manifest/schema2/manifest.go -// FIXME: We are repeating this all over the place; make a public copy? -type schema2Manifest struct { - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType,omitempty"` - Config distributionDescriptor `json:"config"` - Layers []distributionDescriptor `json:"layers"` -} - -// Based on github.com/docker/docker/image/image.go -// MOST CONTENT OMITTED AS UNNECESSARY -type image struct { - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` -} diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go index c79adacc..412261dd 100644 --- a/vendor/github.com/containers/image/image/docker_list.go +++ b/vendor/github.com/containers/image/image/docker_list.go @@ -2,6 +2,7 @@ package image import ( "encoding/json" + "fmt" "runtime" "github.com/containers/image/manifest" @@ -21,7 +22,7 @@ type platformSpec struct { // A manifestDescriptor references a platform-specific manifest. type manifestDescriptor struct { - descriptor + manifest.Schema2Descriptor Platform platformSpec `json:"platform"` } @@ -31,22 +32,36 @@ type manifestList struct { Manifests []manifestDescriptor `json:"manifests"` } -func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) { - list := manifestList{} - if err := json.Unmarshal(manblob, &list); err != nil { - return nil, err +// chooseDigestFromManifestList parses blob as a schema2 manifest list, +// and returns the digest of the image appropriate for the current environment. +func chooseDigestFromManifestList(ctx *types.SystemContext, blob []byte) (digest.Digest, error) { + wantedArch := runtime.GOARCH + if ctx != nil && ctx.ArchitectureChoice != "" { + wantedArch = ctx.ArchitectureChoice + } + wantedOS := runtime.GOOS + if ctx != nil && ctx.OSChoice != "" { + wantedOS = ctx.OSChoice + } + + list := manifestList{} + if err := json.Unmarshal(blob, &list); err != nil { + return "", err } - var targetManifestDigest digest.Digest for _, d := range list.Manifests { - if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS { - targetManifestDigest = d.Digest - break + if d.Platform.Architecture == wantedArch && d.Platform.OS == wantedOS { + return d.Digest, nil } } - if targetManifestDigest == "" { - return nil, errors.New("no supported platform found in manifest list") + return "", fmt.Errorf("no image found in manifest list for architecture %s, OS %s", wantedArch, wantedOS) +} + +func manifestSchema2FromManifestList(ctx *types.SystemContext, src types.ImageSource, manblob []byte) (genericManifest, error) { + targetManifestDigest, err := chooseDigestFromManifestList(ctx, manblob) + if err != nil { + return nil, err } - manblob, mt, err := src.GetTargetManifest(targetManifestDigest) + manblob, mt, err := src.GetManifest(&targetManifestDigest) if err != nil { return nil, err } @@ -59,5 +74,20 @@ func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (gen return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest) } - return manifestInstanceFromBlob(src, manblob, mt) + return manifestInstanceFromBlob(ctx, src, manblob, mt) +} + +// ChooseManifestInstanceFromManifestList returns a digest of a manifest appropriate +// for the current system from the manifest available from src. +func ChooseManifestInstanceFromManifestList(ctx *types.SystemContext, src types.UnparsedImage) (digest.Digest, error) { + // For now this only handles manifest.DockerV2ListMediaType; we can generalize it later, + // probably along with manifest list editing. + blob, mt, err := src.Manifest() + if err != nil { + return "", err + } + if mt != manifest.DockerV2ListMediaType { + return "", fmt.Errorf("Internal error: Trying to select an image from a non-manifest-list manifest type %s", mt) + } + return chooseDigestFromManifestList(ctx, blob) } diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go index 4152b3cd..c6a6989d 100644 --- a/vendor/github.com/containers/image/image/docker_schema1.go +++ b/vendor/github.com/containers/image/image/docker_schema1.go @@ -2,9 +2,6 @@ package image import ( "encoding/json" - "regexp" - "strings" - "time" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" @@ -14,87 +11,25 @@ import ( "github.com/pkg/errors" ) -var ( - validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) -) - -type fsLayersSchema1 struct { - BlobSum digest.Digest `json:"blobSum"` -} - -type historySchema1 struct { - V1Compatibility string `json:"v1Compatibility"` -} - -// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field. -type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` -} - type manifestSchema1 struct { - Name string `json:"name"` - Tag string `json:"tag"` - Architecture string `json:"architecture"` - FSLayers []fsLayersSchema1 `json:"fsLayers"` - History []historySchema1 `json:"history"` - SchemaVersion int `json:"schemaVersion"` + m *manifest.Schema1 } -func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) { - mschema1 := &manifestSchema1{} - if err := json.Unmarshal(manifest, mschema1); err != nil { - return nil, err - } - if mschema1.SchemaVersion != 1 { - return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion) - } - if len(mschema1.FSLayers) != len(mschema1.History) { - return nil, errors.New("length of history not equal to number of layers") - } - if len(mschema1.FSLayers) == 0 { - return nil, errors.New("no FSLayers in manifest") - } - - if err := fixManifestLayers(mschema1); err != nil { - return nil, err - } - return mschema1, nil -} - -// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. -func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest { - var name, tag string - if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. - name = reference.Path(ref) - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - } - return &manifestSchema1{ - Name: name, - Tag: tag, - Architecture: architecture, - FSLayers: fsLayers, - History: history, - SchemaVersion: 1, - } -} - -func (m *manifestSchema1) serialize() ([]byte, error) { - // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. - unsigned, err := json.Marshal(*m) +func manifestSchema1FromManifest(manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema1FromManifest(manifestBlob) if err != nil { return nil, err } - return manifest.AddDummyV2S1Signature(unsigned) + return &manifestSchema1{m: m}, nil +} + +// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data. +func manifestSchema1FromComponents(ref reference.Named, fsLayers []manifest.Schema1FSLayers, history []manifest.Schema1History, architecture string) genericManifest { + return &manifestSchema1{m: manifest.Schema1FromComponents(ref, fsLayers, history, architecture)} +} + +func (m *manifestSchema1) serialize() ([]byte, error) { + return m.m.Serialize() } func (m *manifestSchema1) manifestMIMEType() string { @@ -104,7 +39,7 @@ func (m *manifestSchema1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -128,11 +63,7 @@ func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema1) LayerInfos() []types.BlobInfo { - layers := make([]types.BlobInfo, len(m.FSLayers)) - for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) - layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} - } - return layers + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -153,22 +84,11 @@ func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) } else { tag = "" } - return m.Name != name || m.Tag != tag + return m.m.Name != name || m.m.Tag != tag } func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) { - v1 := &v1Image{} - if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - Tag: m.Tag, - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(nil) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -181,25 +101,18 @@ func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m + copy := manifestSchema1{m: manifest.Schema1Clone(m.m)} if options.LayerInfos != nil { - // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. - if len(copy.FSLayers) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos)) - } - for i, info := range options.LayerInfos { - // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, - // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. - // So, we don't bother recomputing the IDs in m.History.V1Compatibility. - copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } if options.EmbeddedDockerReference != nil { - copy.Name = reference.Path(options.EmbeddedDockerReference) + copy.m.Name = reference.Path(options.EmbeddedDockerReference) if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged { - copy.Tag = tagged.Tag() + copy.m.Tag = tagged.Tag() } else { - copy.Tag = "" + copy.m.Tag = "" } } @@ -209,7 +122,21 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so, // handle conversions between them by doing nothing. case manifest.DockerV2Schema2MediaType: - return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return memoryImageFromManifest(m2), nil + case imgspecv1.MediaTypeImageManifest: + // We can't directly convert to OCI, but we can transitively convert via a Docker V2.2 Distribution manifest + m2, err := copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs) + if err != nil { + return nil, err + } + return m2.UpdatedImage(types.ManifestUpdateOptions{ + ManifestMIMEType: imgspecv1.MediaTypeImageManifest, + InformationOnly: options.InformationOnly, + }) default: return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType) } @@ -217,102 +144,32 @@ func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } -// fixManifestLayers, after validating the supplied manifest -// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History), -// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates, -// both from manifest.History and manifest.FSLayers). -// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries -// (for Dockerfile operations which change the configuration but not the filesystem). -func fixManifestLayers(manifest *manifestSchema1) error { - type imageV1 struct { - ID string - Parent string - } - // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History) - imgs := make([]*imageV1, len(manifest.FSLayers)) - for i := range manifest.FSLayers { - img := &imageV1{} - - if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := validateV1ID(img.ID); err != nil { - return err - } - } - if imgs[len(imgs)-1].Parent != "" { - return errors.New("Invalid parent ID in the base layer of the image") - } - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...) - manifest.History = append(manifest.History[:i], manifest.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) - } - } - return nil -} - -func validateV1ID(id string) error { - if ok := validHex.MatchString(id); !ok { - return errors.Errorf("image ID %q is invalid", id) - } - return nil -} - // Based on github.com/docker/docker/distribution/pull_v2.go -func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) { - if len(m.History) == 0 { +func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (genericManifest, error) { + if len(m.m.History) == 0 { // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing. return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType) } - if len(m.History) != len(m.FSLayers) { - return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers)) + if len(m.m.History) != len(m.m.FSLayers) { + return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.m.History), len(m.m.FSLayers)) } - if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers)) + if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.m.FSLayers)) } - if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) { - return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers)) + if layerDiffIDs != nil && len(layerDiffIDs) != len(m.m.FSLayers) { + return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.m.FSLayers)) } - rootFS := rootFS{ - Type: "layers", - DiffIDs: []digest.Digest{}, - BaseLayer: "", - } - var layers []descriptor - history := make([]imageHistory, len(m.History)) - for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- { - v2Index := (len(m.History) - 1) - v1Index + // Build a list of the diffIDs for the non-empty layers. + diffIDs := []digest.Digest{} + var layers []manifest.Schema2Descriptor + for v1Index := len(m.m.History) - 1; v1Index >= 0; v1Index-- { + v2Index := (len(m.m.History) - 1) - v1Index - var v1compat v1Compatibility - if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil { + var v1compat manifest.Schema1V1Compatibility + if err := json.Unmarshal([]byte(m.m.History[v1Index].V1Compatibility), &v1compat); err != nil { return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index) } - history[v2Index] = imageHistory{ - Created: v1compat.Created, - Author: v1compat.Author, - CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "), - Comment: v1compat.Comment, - EmptyLayer: v1compat.ThrowAway, - } - if !v1compat.ThrowAway { var size int64 if uploadedLayerInfos != nil { @@ -322,54 +179,23 @@ func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.Bl if layerDiffIDs != nil { d = layerDiffIDs[v2Index] } - layers = append(layers, descriptor{ + layers = append(layers, manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip", Size: size, - Digest: m.FSLayers[v1Index].BlobSum, + Digest: m.m.FSLayers[v1Index].BlobSum, }) - rootFS.DiffIDs = append(rootFS.DiffIDs, d) + diffIDs = append(diffIDs, d) } } - configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history) + configJSON, err := m.m.ToSchema2(diffIDs) if err != nil { return nil, err } - configDescriptor := descriptor{ + configDescriptor := manifest.Schema2Descriptor{ MediaType: "application/vnd.docker.container.image.v1+json", Size: int64(len(configJSON)), Digest: digest.FromBytes(configJSON), } - m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers) - return memoryImageFromManifest(m2), nil -} - -func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) { - // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields; - // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be - // a consistently reproducible value. - - // Preserve everything we don't specifically know about. - // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.) - rawContents := map[string]*json.RawMessage{} - if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?! - return nil, err - } - - delete(rawContents, "id") - delete(rawContents, "parent") - delete(rawContents, "Size") - delete(rawContents, "parent_id") - delete(rawContents, "layer_id") - delete(rawContents, "throwaway") - - updates := map[string]interface{}{"rootfs": rootFS, "history": history} - for field, value := range updates { - encoded, err := json.Marshal(value) - if err != nil { - return nil, err - } - rawContents[field] = (*json.RawMessage)(&encoded) - } - return json.Marshal(rawContents) + return manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers), nil } diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go index 8cc3c495..b43bc17c 100644 --- a/vendor/github.com/containers/image/image/docker_schema2.go +++ b/vendor/github.com/containers/image/image/docker_schema2.go @@ -29,54 +29,44 @@ var gzippedEmptyLayer = []byte{ // gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") -type descriptor struct { - MediaType string `json:"mediaType"` - Size int64 `json:"size"` - Digest digest.Digest `json:"digest"` - URLs []string `json:"urls,omitempty"` -} - type manifestSchema2 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - MediaType string `json:"mediaType"` - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of ConfigDescriptor. + m *manifest.Schema2 } -func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - v2s2 := manifestSchema2{src: src} - if err := json.Unmarshal(manifest, &v2s2); err != nil { +func manifestSchema2FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.Schema2FromManifest(manifestBlob) + if err != nil { return nil, err } - return &v2s2, nil + return &manifestSchema2{ + src: src, + m: m, + }, nil } // manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data: -func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest { +func manifestSchema2FromComponents(config manifest.Schema2Descriptor, src types.ImageSource, configBlob []byte, layers []manifest.Schema2Descriptor) genericManifest { return &manifestSchema2{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - MediaType: manifest.DockerV2Schema2MediaType, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.Schema2FromComponents(config, layers), } } func (m *manifestSchema2) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestSchema2) manifestMIMEType() string { - return m.MediaType + return m.m.MediaType } // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestSchema2) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} + return m.m.ConfigInfo() } // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -105,9 +95,9 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.ConfigDescriptor.Digest, + Size: m.m.ConfigDescriptor.Size, + URLs: m.m.ConfigDescriptor.URLs, }) if err != nil { return nil, err @@ -118,8 +108,8 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.ConfigDescriptor.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.ConfigDescriptor.Digest) } m.configBlob = blob } @@ -130,15 +120,7 @@ func (m *manifestSchema2) ConfigBlob() ([]byte, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestSchema2) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{ - Digest: layer.Digest, - Size: layer.Size, - URLs: layer.URLs, - }) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -149,21 +131,18 @@ func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) } func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -176,17 +155,14 @@ func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUp // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestSchema2{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.Schema2Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care. @@ -204,6 +180,15 @@ func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (typ return memoryImageFromManifest(©), nil } +func oci1DescriptorFromSchema2Descriptor(d manifest.Schema2Descriptor) imgspecv1.Descriptor { + return imgspecv1.Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { configOCI, err := m.OCIConfig() if err != nil { @@ -214,18 +199,16 @@ func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) { return nil, err } - config := descriptorOCI1{ - descriptor: descriptor{ - MediaType: imgspecv1.MediaTypeImageConfig, - Size: int64(len(configOCIBytes)), - Digest: digest.FromBytes(configOCIBytes), - }, + config := imgspecv1.Descriptor{ + MediaType: imgspecv1.MediaTypeImageConfig, + Size: int64(len(configOCIBytes)), + Digest: digest.FromBytes(configOCIBytes), } - layers := make([]descriptorOCI1, len(m.LayersDescriptors)) + layers := make([]imgspecv1.Descriptor, len(m.m.LayersDescriptors)) for idx := range layers { - layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]} - if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { + layers[idx] = oci1DescriptorFromSchema2Descriptor(m.m.LayersDescriptors[idx]) + if m.m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType { layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable } else { // we assume layers are gzip'ed because docker v2s2 only deals with @@ -244,14 +227,14 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) if err != nil { return nil, err } - imageConfig := &image{} + imageConfig := &manifest.Schema2Image{} if err := json.Unmarshal(configBytes, imageConfig); err != nil { return nil, err } // Build fsLayers and History, discarding all configs. We will patch the top-level config in later. - fsLayers := make([]fsLayersSchema1, len(imageConfig.History)) - history := make([]historySchema1, len(imageConfig.History)) + fsLayers := make([]manifest.Schema1FSLayers, len(imageConfig.History)) + history := make([]manifest.Schema1History, len(imageConfig.History)) nonemptyLayerIndex := 0 var parentV1ID string // Set in the loop v1ID := "" @@ -279,10 +262,10 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } blobDigest = gzippedEmptyLayerDigest } else { - if nonemptyLayerIndex >= len(m.LayersDescriptors) { - return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors)) + if nonemptyLayerIndex >= len(m.m.LayersDescriptors) { + return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.m.LayersDescriptors)) } - blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest + blobDigest = m.m.LayersDescriptors[nonemptyLayerIndex].Digest nonemptyLayerIndex++ } @@ -293,7 +276,7 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) } v1ID = v - fakeImage := v1Compatibility{ + fakeImage := manifest.Schema1V1Compatibility{ ID: v1ID, Parent: parentV1ID, Comment: historyEntry.Comment, @@ -307,8 +290,8 @@ func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage) } - fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest} - history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)} + fsLayers[v1Index] = manifest.Schema1FSLayers{BlobSum: blobDigest} + history[v1Index] = manifest.Schema1History{V1Compatibility: string(v1CompatibilityBytes)} // Note that parentV1ID of the top layer is preserved when exiting this loop } diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go index 75c9e711..cdd4233f 100644 --- a/vendor/github.com/containers/image/image/manifest.go +++ b/vendor/github.com/containers/image/image/manifest.go @@ -1,57 +1,14 @@ package image import ( - "time" + "fmt" "github.com/containers/image/docker/reference" "github.com/containers/image/manifest" - "github.com/containers/image/pkg/strslice" "github.com/containers/image/types" - "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" ) -type config struct { - Cmd strslice.StrSlice - Labels map[string]string -} - -type v1Image struct { - ID string `json:"id,omitempty"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig *config `json:"container_config,omitempty"` - DockerVersion string `json:"docker_version,omitempty"` - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` -} - -type image struct { - v1Image - History []imageHistory `json:"history,omitempty"` - RootFS *rootFS `json:"rootfs,omitempty"` -} - -type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -type rootFS struct { - Type string `json:"type"` - DiffIDs []digest.Digest `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - // genericManifest is an interface for parsing, modifying image manifests and related data. // Note that the public methods are intended to be a subset of types.Image // so that embedding a genericManifest into structs works. @@ -87,43 +44,24 @@ type genericManifest interface { UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) } -func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { - switch mt { - // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . - // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might - // need to happen within the ImageSource. - case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json": +// manifestInstanceFromBlob returns a genericManifest implementation for (manblob, mt) in src. +// If manblob is a manifest list, it implicitly chooses an appropriate image from the list. +func manifestInstanceFromBlob(ctx *types.SystemContext, src types.ImageSource, manblob []byte, mt string) (genericManifest, error) { + switch manifest.NormalizedMIMEType(mt) { + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType: return manifestSchema1FromManifest(manblob) case imgspecv1.MediaTypeImageManifest: return manifestOCI1FromManifest(src, manblob) case manifest.DockerV2Schema2MediaType: return manifestSchema2FromManifest(src, manblob) case manifest.DockerV2ListMediaType: - return manifestSchema2FromManifestList(src, manblob) - default: - // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time - // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 - // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 - // - // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. - // This makes no real sense, but it happens - // because requests for manifests are - // redirected to a content distribution - // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 - return manifestSchema1FromManifest(manblob) + return manifestSchema2FromManifestList(ctx, src, manblob) + default: // Note that this may not be reachable, manifest.NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) } } // inspectManifest is an implementation of types.Image.Inspect func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) { - info, err := m.imageInspectInfo() - if err != nil { - return nil, err - } - layers := m.LayerInfos() - info.Layers = make([]string, len(layers)) - for i, layer := range layers { - info.Layers[i] = layer.Digest.String() - } - return info, nil + return m.imageInspectInfo() } diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go index 62995f61..4639c49a 100644 --- a/vendor/github.com/containers/image/image/memory.go +++ b/vendor/github.com/containers/image/image/memory.go @@ -33,11 +33,6 @@ func (i *memoryImage) Reference() types.ImageReference { return nil } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *memoryImage) Close() error { - return nil -} - // Size returns the size of the image as stored, if known, or -1 if not. func (i *memoryImage) Size() (int64, error) { return -1, nil @@ -67,7 +62,9 @@ func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -// IsMultiImage returns true if the image's manifest is a list of images, false otherwise. -func (i *memoryImage) IsMultiImage() bool { - return false +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *memoryImage) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go index 5f7c0728..3c03e49b 100644 --- a/vendor/github.com/containers/image/image/oci.go +++ b/vendor/github.com/containers/image/image/oci.go @@ -12,41 +12,34 @@ import ( "github.com/pkg/errors" ) -type descriptorOCI1 struct { - descriptor - Annotations map[string]string `json:"annotations,omitempty"` -} - type manifestOCI1 struct { - src types.ImageSource // May be nil if configBlob is not nil - configBlob []byte // If set, corresponds to contents of ConfigDescriptor. - SchemaVersion int `json:"schemaVersion"` - ConfigDescriptor descriptorOCI1 `json:"config"` - LayersDescriptors []descriptorOCI1 `json:"layers"` - Annotations map[string]string `json:"annotations,omitempty"` + src types.ImageSource // May be nil if configBlob is not nil + configBlob []byte // If set, corresponds to contents of m.Config. + m *manifest.OCI1 } -func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) { - oci := manifestOCI1{src: src} - if err := json.Unmarshal(manifest, &oci); err != nil { +func manifestOCI1FromManifest(src types.ImageSource, manifestBlob []byte) (genericManifest, error) { + m, err := manifest.OCI1FromManifest(manifestBlob) + if err != nil { return nil, err } - return &oci, nil + return &manifestOCI1{ + src: src, + m: m, + }, nil } // manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data: -func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest { +func manifestOCI1FromComponents(config imgspecv1.Descriptor, src types.ImageSource, configBlob []byte, layers []imgspecv1.Descriptor) genericManifest { return &manifestOCI1{ - src: src, - configBlob: configBlob, - SchemaVersion: 2, - ConfigDescriptor: config, - LayersDescriptors: layers, + src: src, + configBlob: configBlob, + m: manifest.OCI1FromComponents(config, layers), } } func (m *manifestOCI1) serialize() ([]byte, error) { - return json.Marshal(*m) + return m.m.Serialize() } func (m *manifestOCI1) manifestMIMEType() string { @@ -56,7 +49,7 @@ func (m *manifestOCI1) manifestMIMEType() string { // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. func (m *manifestOCI1) ConfigInfo() types.BlobInfo { - return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size, Annotations: m.ConfigDescriptor.Annotations} + return m.m.ConfigInfo() } // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. @@ -67,9 +60,9 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1") } stream, _, err := m.src.GetBlob(types.BlobInfo{ - Digest: m.ConfigDescriptor.Digest, - Size: m.ConfigDescriptor.Size, - URLs: m.ConfigDescriptor.URLs, + Digest: m.m.Config.Digest, + Size: m.m.Config.Size, + URLs: m.m.Config.URLs, }) if err != nil { return nil, err @@ -80,8 +73,8 @@ func (m *manifestOCI1) ConfigBlob() ([]byte, error) { return nil, err } computedDigest := digest.FromBytes(blob) - if computedDigest != m.ConfigDescriptor.Digest { - return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest) + if computedDigest != m.m.Config.Digest { + return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.m.Config.Digest) } m.configBlob = blob } @@ -107,11 +100,7 @@ func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) { // The Digest field is guaranteed to be provided; Size may be -1. // WARNING: The list may contain duplicates, and they are semantically relevant. func (m *manifestOCI1) LayerInfos() []types.BlobInfo { - blobs := []types.BlobInfo{} - for _, layer := range m.LayersDescriptors { - blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs}) - } - return blobs + return m.m.LayerInfos() } // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -122,21 +111,18 @@ func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) boo } func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) { - config, err := m.ConfigBlob() - if err != nil { - return nil, err + getter := func(info types.BlobInfo) ([]byte, error) { + if info.Digest != m.ConfigInfo().Digest { + // Shouldn't ever happen + return nil, errors.New("asked for a different config blob") + } + config, err := m.ConfigBlob() + if err != nil { + return nil, err + } + return config, nil } - v1 := &v1Image{} - if err := json.Unmarshal(config, v1); err != nil { - return nil, err - } - return &types.ImageInspectInfo{ - DockerVersion: v1.DockerVersion, - Created: v1.Created, - Labels: v1.Config.Labels, - Architecture: v1.Architecture, - Os: v1.OS, - }, nil + return m.m.Inspect(getter) } // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs. @@ -149,18 +135,14 @@ func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdat // UpdatedImage returns a types.Image modified according to options. // This does not change the state of the original Image object. func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) { - copy := *m // NOTE: This is not a deep copy, it still shares slices etc. + copy := manifestOCI1{ // NOTE: This is not a deep copy, it still shares slices etc. + src: m.src, + configBlob: m.configBlob, + m: manifest.OCI1Clone(m.m), + } if options.LayerInfos != nil { - if len(copy.LayersDescriptors) != len(options.LayerInfos) { - return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos)) - } - copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos)) - for i, info := range options.LayerInfos { - copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType - copy.LayersDescriptors[i].Digest = info.Digest - copy.LayersDescriptors[i].Size = info.Size - copy.LayersDescriptors[i].Annotations = info.Annotations - copy.LayersDescriptors[i].URLs = info.URLs + if err := copy.m.UpdateLayerInfos(options.LayerInfos); err != nil { + return nil, err } } // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care. @@ -176,17 +158,26 @@ func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types. return memoryImageFromManifest(©), nil } +func schema2DescriptorFromOCI1Descriptor(d imgspecv1.Descriptor) manifest.Schema2Descriptor { + return manifest.Schema2Descriptor{ + MediaType: d.MediaType, + Size: d.Size, + Digest: d.Digest, + URLs: d.URLs, + } +} + func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) { // Create a copy of the descriptor. - config := m.ConfigDescriptor.descriptor + config := schema2DescriptorFromOCI1Descriptor(m.m.Config) // The only difference between OCI and DockerSchema2 is the mediatypes. The // media type of the manifest is handled by manifestSchema2FromComponents. config.MediaType = manifest.DockerV2Schema2ConfigMediaType - layers := make([]descriptor, len(m.LayersDescriptors)) + layers := make([]manifest.Schema2Descriptor, len(m.m.Layers)) for idx := range layers { - layers[idx] = m.LayersDescriptors[idx].descriptor + layers[idx] = schema2DescriptorFromOCI1Descriptor(m.m.Layers[idx]) layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType } diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go index ef35b3c3..3477f341 100644 --- a/vendor/github.com/containers/image/image/sourced.go +++ b/vendor/github.com/containers/image/image/sourced.go @@ -4,12 +4,22 @@ package image import ( - "github.com/containers/image/manifest" "github.com/containers/image/types" ) -// FromSource returns a types.Image implementation for source. -// The caller must call .Close() on the returned Image. +// imageCloser implements types.ImageCloser, perhaps allowing simple users +// to use a single object without having keep a reference to a types.ImageSource +// only to call types.ImageSource.Close(). +type imageCloser struct { + types.Image + src types.ImageSource +} + +// FromSource returns a types.ImageCloser implementation for the default instance of source. +// If source is a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate image instance. +// +// The caller must call .Close() on the returned ImageCloser. // // FromSource “takes ownership” of the input ImageSource and will call src.Close() // when the image is closed. (This does not prevent callers from using both the @@ -18,8 +28,19 @@ import ( // // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function. -func FromSource(src types.ImageSource) (types.Image, error) { - return FromUnparsedImage(UnparsedFromSource(src)) +func FromSource(ctx *types.SystemContext, src types.ImageSource) (types.ImageCloser, error) { + img, err := FromUnparsedImage(ctx, UnparsedInstance(src, nil)) + if err != nil { + return nil, err + } + return &imageCloser{ + Image: img, + src: src, + }, nil +} + +func (ic *imageCloser) Close() error { + return ic.src.Close() } // sourcedImage is a general set of utilities for working with container images, @@ -38,27 +59,22 @@ type sourcedImage struct { } // FromUnparsedImage returns a types.Image implementation for unparsed. -// The caller must call .Close() on the returned Image. +// If unparsed represents a manifest list, .Manifest() still returns the manifest list, +// but other methods transparently return data from an appropriate single image. // -// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the Image.) -func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) { +// The Image must not be used after the underlying ImageSource is Close()d. +func FromUnparsedImage(ctx *types.SystemContext, unparsed *UnparsedImage) (types.Image, error) { // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage: // we want to be able to use unparsed.src. We could make that an explicit interface, but, well, // this is the only UnparsedImage implementation around, anyway. - // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to - // unparsed.Close. - // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest(). manifestBlob, manifestMIMEType, err := unparsed.Manifest() if err != nil { return nil, err } - parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType) + parsedManifest, err := manifestInstanceFromBlob(ctx, unparsed.src, manifestBlob, manifestMIMEType) if err != nil { return nil, err } @@ -85,6 +101,6 @@ func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) { return inspectManifest(i.genericManifest) } -func (i *sourcedImage) IsMultiImage() bool { - return i.manifestMIMEType == manifest.DockerV2ListMediaType +func (i *sourcedImage) LayerInfosForCopy() []types.BlobInfo { + return i.UnparsedImage.LayerInfosForCopy() } diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go index 483cfd04..aff06d8a 100644 --- a/vendor/github.com/containers/image/image/unparsed.go +++ b/vendor/github.com/containers/image/image/unparsed.go @@ -11,8 +11,10 @@ import ( ) // UnparsedImage implements types.UnparsedImage . +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. type UnparsedImage struct { src types.ImageSource + instanceDigest *digest.Digest cachedManifest []byte // A private cache for Manifest(); nil if not yet known. // A private cache for Manifest(), may be the empty string if guessing failed. // Valid iff cachedManifest is not nil. @@ -20,49 +22,41 @@ type UnparsedImage struct { cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known. } -// UnparsedFromSource returns a types.UnparsedImage implementation for source. -// The caller must call .Close() on the returned UnparsedImage. +// UnparsedInstance returns a types.UnparsedImage implementation for (source, instanceDigest). +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list). // -// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close() -// when the image is closed. (This does not prevent callers from using both the -// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to -// keep a reference to the UnparsedImage.) -func UnparsedFromSource(src types.ImageSource) *UnparsedImage { - return &UnparsedImage{src: src} +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. +func UnparsedInstance(src types.ImageSource, instanceDigest *digest.Digest) *UnparsedImage { + return &UnparsedImage{ + src: src, + instanceDigest: instanceDigest, + } } // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. func (i *UnparsedImage) Reference() types.ImageReference { + // Note that this does not depend on instanceDigest; e.g. all instances within a manifest list need to be signed with the manifest list identity. return i.src.Reference() } -// Close removes resources associated with an initialized UnparsedImage, if any. -func (i *UnparsedImage) Close() error { - return i.src.Close() -} - // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Manifest() ([]byte, string, error) { if i.cachedManifest == nil { - m, mt, err := i.src.GetManifest() + m, mt, err := i.src.GetManifest(i.instanceDigest) if err != nil { return nil, "", err } // ImageSource.GetManifest does not do digest verification, but we do; // this immediately protects also any user of types.Image. - ref := i.Reference().DockerReference() - if ref != nil { - if canonical, ok := ref.(reference.Canonical); ok { - digest := digest.Digest(canonical.Digest()) - matches, err := manifest.MatchesDigest(m, digest) - if err != nil { - return nil, "", errors.Wrap(err, "Error computing manifest digest") - } - if !matches { - return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) - } + if digest, haveDigest := i.expectedManifestDigest(); haveDigest { + matches, err := manifest.MatchesDigest(m, digest) + if err != nil { + return nil, "", errors.Wrap(err, "Error computing manifest digest") + } + if !matches { + return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest) } } @@ -72,10 +66,26 @@ func (i *UnparsedImage) Manifest() ([]byte, string, error) { return i.cachedManifest, i.cachedManifestMIMEType, nil } +// expectedManifestDigest returns a the expected value of the manifest digest, and an indicator whether it is known. +// The bool return value seems redundant with digest != ""; it is used explicitly +// to refuse (unexpected) situations when the digest exists but is "". +func (i *UnparsedImage) expectedManifestDigest() (digest.Digest, bool) { + if i.instanceDigest != nil { + return *i.instanceDigest, true + } + ref := i.Reference().DockerReference() + if ref != nil { + if canonical, ok := ref.(reference.Canonical); ok { + return canonical.Digest(), true + } + } + return "", false +} + // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { if i.cachedSignatures == nil { - sigs, err := i.src.GetSignatures(ctx) + sigs, err := i.src.GetSignatures(ctx, i.instanceDigest) if err != nil { return nil, err } @@ -83,3 +93,10 @@ func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) { } return i.cachedSignatures, nil } + +// LayerInfosForCopy returns an updated set of layer blob information which may not match the manifest. +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (i *UnparsedImage) LayerInfosForCopy() []types.BlobInfo { + return i.src.LayerInfosForCopy() +} diff --git a/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go new file mode 100644 index 00000000..a28020ed --- /dev/null +++ b/vendor/github.com/containers/image/internal/tmpdir/tmpdir.go @@ -0,0 +1,19 @@ +package tmpdir + +import ( + "os" + "runtime" +) + +// TemporaryDirectoryForBigFiles returns a directory for temporary (big) files. +// On non Windows systems it avoids the use of os.TempDir(), because the default temporary directory usually falls under /tmp +// which on systemd based systems could be the unsuitable tmpfs filesystem. +func TemporaryDirectoryForBigFiles() string { + var temporaryDirectoryForBigFiles string + if runtime.GOOS == "windows" { + temporaryDirectoryForBigFiles = os.TempDir() + } else { + temporaryDirectoryForBigFiles = "/var/tmp" + } + return temporaryDirectoryForBigFiles +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema1.go b/vendor/github.com/containers/image/manifest/docker_schema1.go new file mode 100644 index 00000000..b1c1cfe9 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema1.go @@ -0,0 +1,310 @@ +package manifest + +import ( + "encoding/json" + "regexp" + "strings" + "time" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/types" + "github.com/docker/docker/api/types/versions" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema1FSLayers is an entry of the "fsLayers" array in docker/distribution schema 1. +type Schema1FSLayers struct { + BlobSum digest.Digest `json:"blobSum"` +} + +// Schema1History is an entry of the "history" array in docker/distribution schema 1. +type Schema1History struct { + V1Compatibility string `json:"v1Compatibility"` +} + +// Schema1 is a manifest in docker/distribution schema 1. +type Schema1 struct { + Name string `json:"name"` + Tag string `json:"tag"` + Architecture string `json:"architecture"` + FSLayers []Schema1FSLayers `json:"fsLayers"` + History []Schema1History `json:"history"` + SchemaVersion int `json:"schemaVersion"` +} + +// Schema1V1Compatibility is a v1Compatibility in docker/distribution schema 1. +type Schema1V1Compatibility struct { + ID string `json:"id"` + Parent string `json:"parent,omitempty"` + Comment string `json:"comment,omitempty"` + Created time.Time `json:"created"` + ContainerConfig struct { + Cmd []string + } `json:"container_config,omitempty"` + Author string `json:"author,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` +} + +// Schema1FromManifest creates a Schema1 manifest instance from a manifest blob. +// (NOTE: The instance is not necessary a literal representation of the original blob, +// layers with duplicate IDs are eliminated.) +func Schema1FromManifest(manifest []byte) (*Schema1, error) { + s1 := Schema1{} + if err := json.Unmarshal(manifest, &s1); err != nil { + return nil, err + } + if s1.SchemaVersion != 1 { + return nil, errors.Errorf("unsupported schema version %d", s1.SchemaVersion) + } + if len(s1.FSLayers) != len(s1.History) { + return nil, errors.New("length of history not equal to number of layers") + } + if len(s1.FSLayers) == 0 { + return nil, errors.New("no FSLayers in manifest") + } + if err := s1.fixManifestLayers(); err != nil { + return nil, err + } + return &s1, nil +} + +// Schema1FromComponents creates an Schema1 manifest instance from the supplied data. +func Schema1FromComponents(ref reference.Named, fsLayers []Schema1FSLayers, history []Schema1History, architecture string) *Schema1 { + var name, tag string + if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them. + name = reference.Path(ref) + if tagged, ok := ref.(reference.NamedTagged); ok { + tag = tagged.Tag() + } + } + return &Schema1{ + Name: name, + Tag: tag, + Architecture: architecture, + FSLayers: fsLayers, + History: history, + SchemaVersion: 1, + } +} + +// Schema1Clone creates a copy of the supplied Schema1 manifest. +func Schema1Clone(src *Schema1) *Schema1 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema1) LayerInfos() []types.BlobInfo { + layers := make([]types.BlobInfo, len(m.FSLayers)) + for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway) + layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1} + } + return layers +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well. + if len(m.FSLayers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.FSLayers), len(layerInfos)) + } + for i, info := range layerInfos { + // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest, + // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness. + // So, we don't bother recomputing the IDs in m.History.V1Compatibility. + m.FSLayers[(len(layerInfos)-1)-i].BlobSum = info.Digest + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema1) Serialize() ([]byte, error) { + // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType. + unsigned, err := json.Marshal(*m) + if err != nil { + return nil, err + } + return AddDummyV2S1Signature(unsigned) +} + +// fixManifestLayers, after validating the supplied manifest +// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in m.History), +// modifies manifest to only have one entry for each layer ID in m.History (deleting the older duplicates, +// both from m.History and m.FSLayers). +// Note that even after this succeeds, m.FSLayers may contain duplicate entries +// (for Dockerfile operations which change the configuration but not the filesystem). +func (m *Schema1) fixManifestLayers() error { + type imageV1 struct { + ID string + Parent string + } + // Per the specification, we can assume that len(m.FSLayers) == len(m.History) + imgs := make([]*imageV1, len(m.FSLayers)) + for i := range m.FSLayers { + img := &imageV1{} + + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { + return err + } + + imgs[i] = img + if err := validateV1ID(img.ID); err != nil { + return err + } + } + if imgs[len(imgs)-1].Parent != "" { + return errors.New("Invalid parent ID in the base layer of the image") + } + // check general duplicates to error instead of a deadlock + idmap := make(map[string]struct{}) + var lastID string + for _, img := range imgs { + // skip IDs that appear after each other, we handle those later + if _, exists := idmap[img.ID]; img.ID != lastID && exists { + return errors.Errorf("ID %+v appears multiple times in manifest", img.ID) + } + lastID = img.ID + idmap[lastID] = struct{}{} + } + // backwards loop so that we keep the remaining indexes after removing items + for i := len(imgs) - 2; i >= 0; i-- { + if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue + m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) + m.History = append(m.History[:i], m.History[i+1:]...) + } else if imgs[i].Parent != imgs[i+1].ID { + return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent) + } + } + return nil +} + +var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) + +func validateV1ID(id string) error { + if ok := validHex.MatchString(id); !ok { + return errors.Errorf("image ID %q is invalid", id) + } + return nil +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema1) Inspect(_ func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + s1 := &Schema2V1Image{} + if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), s1); err != nil { + return nil, err + } + return &types.ImageInspectInfo{ + Tag: m.Tag, + Created: s1.Created, + DockerVersion: s1.DockerVersion, + Labels: make(map[string]string), + Architecture: s1.Architecture, + Os: s1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + }, nil +} + +// ToSchema2 builds a schema2-style configuration blob using the supplied diffIDs. +func (m *Schema1) ToSchema2(diffIDs []digest.Digest) ([]byte, error) { + // Convert the schema 1 compat info into a schema 2 config, constructing some of the fields + // that aren't directly comparable using info from the manifest. + if len(m.History) == 0 { + return nil, errors.New("image has no layers") + } + s2 := struct { + Schema2Image + ID string `json:"id,omitempty"` + Parent string `json:"parent,omitempty"` + ParentID string `json:"parent_id,omitempty"` + LayerID string `json:"layer_id,omitempty"` + ThrowAway bool `json:"throwaway,omitempty"` + Size int64 `json:",omitempty"` + }{} + config := []byte(m.History[0].V1Compatibility) + err := json.Unmarshal(config, &s2) + if err != nil { + return nil, errors.Wrapf(err, "error decoding configuration") + } + // Images created with versions prior to 1.8.3 require us to re-encode the encoded object, + // adding some fields that aren't "omitempty". + if s2.DockerVersion != "" && versions.LessThan(s2.DockerVersion, "1.8.3") { + config, err = json.Marshal(&s2) + if err != nil { + return nil, errors.Wrapf(err, "error re-encoding compat image config %#v", s2) + } + } + // Build the history. + convertedHistory := []Schema2History{} + for _, h := range m.History { + compat := Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(h.V1Compatibility), &compat); err != nil { + return nil, errors.Wrapf(err, "error decoding history information") + } + hitem := Schema2History{ + Created: compat.Created, + CreatedBy: strings.Join(compat.ContainerConfig.Cmd, " "), + Author: compat.Author, + Comment: compat.Comment, + EmptyLayer: compat.ThrowAway, + } + convertedHistory = append([]Schema2History{hitem}, convertedHistory...) + } + // Build the rootfs information. We need the decompressed sums that we've been + // calculating to fill in the DiffIDs. It's expected (but not enforced by us) + // that the number of diffIDs corresponds to the number of non-EmptyLayer + // entries in the history. + rootFS := &Schema2RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + // And now for some raw manipulation. + raw := make(map[string]*json.RawMessage) + err = json.Unmarshal(config, &raw) + if err != nil { + return nil, errors.Wrapf(err, "error re-decoding compat image config %#v: %v", s2) + } + // Drop some fields. + delete(raw, "id") + delete(raw, "parent") + delete(raw, "parent_id") + delete(raw, "layer_id") + delete(raw, "throwaway") + delete(raw, "Size") + // Add the history and rootfs information. + rootfs, err := json.Marshal(rootFS) + if err != nil { + return nil, errors.Errorf("error encoding rootfs information %#v: %v", rootFS, err) + } + rawRootfs := json.RawMessage(rootfs) + raw["rootfs"] = &rawRootfs + history, err := json.Marshal(convertedHistory) + if err != nil { + return nil, errors.Errorf("error encoding history information %#v: %v", convertedHistory, err) + } + rawHistory := json.RawMessage(history) + raw["history"] = &rawHistory + // Encode the result. + config, err = json.Marshal(raw) + if err != nil { + return nil, errors.Errorf("error re-encoding compat image config %#v: %v", s2, err) + } + return config, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema1) ImageID(diffIDs []digest.Digest) (string, error) { + image, err := m.ToSchema2(diffIDs) + if err != nil { + return "", err + } + return digest.FromBytes(image).Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/docker_schema2.go b/vendor/github.com/containers/image/manifest/docker_schema2.go new file mode 100644 index 00000000..ef82ffc2 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/docker_schema2.go @@ -0,0 +1,251 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/pkg/strslice" + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Schema2Descriptor is a “descriptor” in docker/distribution schema 2. +type Schema2Descriptor struct { + MediaType string `json:"mediaType"` + Size int64 `json:"size"` + Digest digest.Digest `json:"digest"` + URLs []string `json:"urls,omitempty"` +} + +// Schema2 is a manifest in docker/distribution schema 2. +type Schema2 struct { + SchemaVersion int `json:"schemaVersion"` + MediaType string `json:"mediaType"` + ConfigDescriptor Schema2Descriptor `json:"config"` + LayersDescriptors []Schema2Descriptor `json:"layers"` +} + +// Schema2Port is a Port, a string containing port number and protocol in the +// format "80/tcp", from docker/go-connections/nat. +type Schema2Port string + +// Schema2PortSet is a PortSet, a collection of structs indexed by Port, from +// docker/go-connections/nat. +type Schema2PortSet map[Schema2Port]struct{} + +// Schema2HealthConfig is a HealthConfig, which holds configuration settings +// for the HEALTHCHECK feature, from docker/docker/api/types/container. +type Schema2HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Schema2Config is a Config in docker/docker/api/types/container. +type Schema2Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts Schema2PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *Schema2HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} + +// Schema2V1Image is a V1Image in docker/docker/image. +type Schema2V1Image struct { + // ID is a unique 64 character identifier of the image + ID string `json:"id,omitempty"` + // Parent is the ID of the parent image + Parent string `json:"parent,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Container is the id of the container used to commit + Container string `json:"container,omitempty"` + // ContainerConfig is the configuration of the container that is committed into the image + ContainerConfig Schema2Config `json:"container_config,omitempty"` + // DockerVersion specifies the version of Docker that was used to build the image + DockerVersion string `json:"docker_version,omitempty"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // Config is the configuration of the container received from the client + Config *Schema2Config `json:"config,omitempty"` + // Architecture is the hardware that the image is build and runs on + Architecture string `json:"architecture,omitempty"` + // OS is the operating system used to build and run the image + OS string `json:"os,omitempty"` + // Size is the total size of the image including all layers it is composed of + Size int64 `json:",omitempty"` +} + +// Schema2RootFS is a description of how to build up an image's root filesystem, from docker/docker/image. +type Schema2RootFS struct { + Type string `json:"type"` + DiffIDs []digest.Digest `json:"diff_ids,omitempty"` +} + +// Schema2History stores build commands that were used to create an image, from docker/docker/image. +type Schema2History struct { + // Created is the timestamp at which the image was created + Created time.Time `json:"created"` + // Author is the name of the author that was specified when committing the image + Author string `json:"author,omitempty"` + // CreatedBy keeps the Dockerfile command used while building the image + CreatedBy string `json:"created_by,omitempty"` + // Comment is the commit message that was set when committing the image + Comment string `json:"comment,omitempty"` + // EmptyLayer is set to true if this history item did not generate a + // layer. Otherwise, the history item is associated with the next + // layer in the RootFS section. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Schema2Image is an Image in docker/docker/image. +type Schema2Image struct { + Schema2V1Image + Parent digest.Digest `json:"parent,omitempty"` + RootFS *Schema2RootFS `json:"rootfs,omitempty"` + History []Schema2History `json:"history,omitempty"` + OSVersion string `json:"os.version,omitempty"` + OSFeatures []string `json:"os.features,omitempty"` + + // rawJSON caches the immutable JSON associated with this image. + rawJSON []byte + + // computedID is the ID computed from the hash of the image config. + // Not to be confused with the legacy V1 ID in V1Image. + computedID digest.Digest +} + +// Schema2FromManifest creates a Schema2 manifest instance from a manifest blob. +func Schema2FromManifest(manifest []byte) (*Schema2, error) { + s2 := Schema2{} + if err := json.Unmarshal(manifest, &s2); err != nil { + return nil, err + } + return &s2, nil +} + +// Schema2FromComponents creates an Schema2 manifest instance from the supplied data. +func Schema2FromComponents(config Schema2Descriptor, layers []Schema2Descriptor) *Schema2 { + return &Schema2{ + SchemaVersion: 2, + MediaType: DockerV2Schema2MediaType, + ConfigDescriptor: config, + LayersDescriptors: layers, + } +} + +// Schema2Clone creates a copy of the supplied Schema2 manifest. +func Schema2Clone(src *Schema2) *Schema2 { + copy := *src + return © +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *Schema2) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *Schema2) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.LayersDescriptors { + blobs = append(blobs, types.BlobInfo{ + Digest: layer.Digest, + Size: layer.Size, + URLs: layer.URLs, + }) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *Schema2) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.LayersDescriptors) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.LayersDescriptors), len(layerInfos)) + } + original := m.LayersDescriptors + m.LayersDescriptors = make([]Schema2Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.LayersDescriptors[i].MediaType = original[i].MediaType + m.LayersDescriptors[i].Digest = info.Digest + m.LayersDescriptors[i].Size = info.Size + m.LayersDescriptors[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *Schema2) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *Schema2) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + s2 := &Schema2Image{} + if err := json.Unmarshal(config, s2); err != nil { + return nil, err + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: s2.Created, + DockerVersion: s2.DockerVersion, + Architecture: s2.Architecture, + Os: s2.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + if s2.Config != nil { + i.Labels = s2.Config.Labels + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *Schema2) ImageID([]digest.Digest) (string, error) { + if err := m.ConfigDescriptor.Digest.Validate(); err != nil { + return "", err + } + return m.ConfigDescriptor.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go index e329ee57..2bc801d8 100644 --- a/vendor/github.com/containers/image/manifest/manifest.go +++ b/vendor/github.com/containers/image/manifest/manifest.go @@ -2,7 +2,9 @@ package manifest import ( "encoding/json" + "fmt" + "github.com/containers/image/types" "github.com/docker/libtrust" "github.com/opencontainers/go-digest" imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" @@ -35,7 +37,40 @@ var DefaultRequestedManifestMIMETypes = []string{ DockerV2Schema2MediaType, DockerV2Schema1SignedMediaType, DockerV2Schema1MediaType, - // DockerV2ListMediaType, // FIXME: Restore this ASAP + DockerV2ListMediaType, +} + +// Manifest is an interface for parsing, modifying image manifests in isolation. +// Callers can either use this abstract interface without understanding the details of the formats, +// or instantiate a specific implementation (e.g. manifest.OCI1) and access the public members +// directly. +// +// See types.Image for functionality not limited to manifests, including format conversions and config parsing. +// This interface is similar to, but not strictly equivalent to, the equivalent methods in types.Image. +type Manifest interface { + // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. + ConfigInfo() types.BlobInfo + // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfos() []types.BlobInfo + // UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) + UpdateLayerInfos(layerInfos []types.BlobInfo) error + + // ImageID computes an ID which can uniquely identify this image by its contents, irrespective + // of which (of possibly more than one simultaneously valid) reference was used to locate the + // image, and unchanged by whether or how the layers are compressed. The result takes the form + // of the hexadecimal portion of a digest.Digest. + ImageID(diffIDs []digest.Digest) (string, error) + + // Inspect returns various information for (skopeo inspect) parsed from the manifest, + // incorporating information from a configuration blob returned by configGetter, if + // the underlying image format is expected to include a configuration blob. + Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) + + // Serialize returns the manifest in a blob format. + // NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! + Serialize() ([]byte, error) } // GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized. @@ -142,3 +177,62 @@ func AddDummyV2S1Signature(manifest []byte) ([]byte, error) { } return js.PrettySignature("signatures") } + +// MIMETypeIsMultiImage returns true if mimeType is a list of images +func MIMETypeIsMultiImage(mimeType string) bool { + return mimeType == DockerV2ListMediaType +} + +// NormalizedMIMEType returns the effective MIME type of a manifest MIME type returned by a server, +// centralizing various workarounds. +func NormalizedMIMEType(input string) string { + switch input { + // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md . + // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might + // need to happen within the ImageSource. + case "application/json": + return DockerV2Schema1SignedMediaType + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType, + imgspecv1.MediaTypeImageManifest, + DockerV2Schema2MediaType, + DockerV2ListMediaType: + return input + default: + // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time + // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108 + // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50 + // + // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag. + // This makes no real sense, but it happens + // because requests for manifests are + // redirected to a content distribution + // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442 + return DockerV2Schema1SignedMediaType + } +} + +// FromBlob returns a Manifest instance for the specified manifest blob and the corresponding MIME type +func FromBlob(manblob []byte, mt string) (Manifest, error) { + switch NormalizedMIMEType(mt) { + case DockerV2Schema1MediaType, DockerV2Schema1SignedMediaType: + return Schema1FromManifest(manblob) + case imgspecv1.MediaTypeImageManifest: + return OCI1FromManifest(manblob) + case DockerV2Schema2MediaType: + return Schema2FromManifest(manblob) + case DockerV2ListMediaType: + return nil, fmt.Errorf("Treating manifest lists as individual manifests is not implemented") + default: // Note that this may not be reachable, NormalizedMIMEType has a default for unknown values. + return nil, fmt.Errorf("Unimplemented manifest MIME type %s", mt) + } +} + +// LayerInfosToStrings converts a list of layer infos, presumably obtained from a Manifest.LayerInfos() +// method call, into a format suitable for inclusion in a types.ImageInspectInfo structure. +func LayerInfosToStrings(infos []types.BlobInfo) []string { + layers := make([]string, len(infos)) + for i, info := range infos { + layers[i] = info.Digest.String() + } + return layers +} diff --git a/vendor/github.com/containers/image/manifest/oci.go b/vendor/github.com/containers/image/manifest/oci.go new file mode 100644 index 00000000..0ffb35b7 --- /dev/null +++ b/vendor/github.com/containers/image/manifest/oci.go @@ -0,0 +1,120 @@ +package manifest + +import ( + "encoding/json" + "time" + + "github.com/containers/image/types" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// OCI1 is a manifest.Manifest implementation for OCI images. +// The underlying data from imgspecv1.Manifest is also available. +type OCI1 struct { + imgspecv1.Manifest +} + +// OCI1FromManifest creates an OCI1 manifest instance from a manifest blob. +func OCI1FromManifest(manifest []byte) (*OCI1, error) { + oci1 := OCI1{} + if err := json.Unmarshal(manifest, &oci1); err != nil { + return nil, err + } + return &oci1, nil +} + +// OCI1FromComponents creates an OCI1 manifest instance from the supplied data. +func OCI1FromComponents(config imgspecv1.Descriptor, layers []imgspecv1.Descriptor) *OCI1 { + return &OCI1{ + imgspecv1.Manifest{ + Versioned: specs.Versioned{SchemaVersion: 2}, + Config: config, + Layers: layers, + }, + } +} + +// OCI1Clone creates a copy of the supplied OCI1 manifest. +func OCI1Clone(src *OCI1) *OCI1 { + return &OCI1{ + Manifest: src.Manifest, + } +} + +// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. +func (m *OCI1) ConfigInfo() types.BlobInfo { + return types.BlobInfo{Digest: m.Config.Digest, Size: m.Config.Size, Annotations: m.Config.Annotations} +} + +// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). +// The Digest field is guaranteed to be provided; Size may be -1. +// WARNING: The list may contain duplicates, and they are semantically relevant. +func (m *OCI1) LayerInfos() []types.BlobInfo { + blobs := []types.BlobInfo{} + for _, layer := range m.Layers { + blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size, Annotations: layer.Annotations, URLs: layer.URLs, MediaType: layer.MediaType}) + } + return blobs +} + +// UpdateLayerInfos replaces the original layers with the specified BlobInfos (size+digest+urls), in order (the root layer first, and then successive layered layers) +func (m *OCI1) UpdateLayerInfos(layerInfos []types.BlobInfo) error { + if len(m.Layers) != len(layerInfos) { + return errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(m.Layers), len(layerInfos)) + } + original := m.Layers + m.Layers = make([]imgspecv1.Descriptor, len(layerInfos)) + for i, info := range layerInfos { + m.Layers[i].MediaType = original[i].MediaType + m.Layers[i].Digest = info.Digest + m.Layers[i].Size = info.Size + m.Layers[i].Annotations = info.Annotations + m.Layers[i].URLs = info.URLs + } + return nil +} + +// Serialize returns the manifest in a blob format. +// NOTE: Serialize() does not in general reproduce the original blob if this object was loaded from one, even if no modifications were made! +func (m *OCI1) Serialize() ([]byte, error) { + return json.Marshal(*m) +} + +// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration. +func (m *OCI1) Inspect(configGetter func(types.BlobInfo) ([]byte, error)) (*types.ImageInspectInfo, error) { + config, err := configGetter(m.ConfigInfo()) + if err != nil { + return nil, err + } + v1 := &imgspecv1.Image{} + if err := json.Unmarshal(config, v1); err != nil { + return nil, err + } + d1 := &Schema2V1Image{} + json.Unmarshal(config, d1) + created := time.Time{} + if v1.Created != nil { + created = *v1.Created + } + i := &types.ImageInspectInfo{ + Tag: "", + Created: created, + DockerVersion: d1.DockerVersion, + Labels: v1.Config.Labels, + Architecture: v1.Architecture, + Os: v1.OS, + Layers: LayerInfosToStrings(m.LayerInfos()), + } + return i, nil +} + +// ImageID computes an ID which can uniquely identify this image by its contents. +func (m *OCI1) ImageID([]digest.Digest) (string, error) { + if err := m.Config.Digest.Validate(); err != nil { + return "", err + } + return m.Config.Digest.Hex(), nil +} diff --git a/vendor/github.com/containers/image/oci/archive/oci_src.go b/vendor/github.com/containers/image/oci/archive/oci_src.go index 8644202f..aee5d8d5 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_src.go +++ b/vendor/github.com/containers/image/oci/archive/oci_src.go @@ -68,14 +68,12 @@ func (s *ociArchiveImageSource) Close() error { return s.unpackedSrc.Close() } -// GetManifest returns the image's manifest along with its MIME type -// (which may be empty when it can't be determined but the manifest is available). -func (s *ociArchiveImageSource) GetManifest() ([]byte, string, error) { - return s.unpackedSrc.GetManifest() -} - -func (s *ociArchiveImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - return s.unpackedSrc.GetTargetManifest(digest) +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + return s.unpackedSrc.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob's size. @@ -83,6 +81,15 @@ func (s *ociArchiveImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int return s.unpackedSrc.GetBlob(info) } -func (s *ociArchiveImageSource) GetSignatures(c context.Context) ([][]byte, error) { - return s.unpackedSrc.GetSignatures(c) +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociArchiveImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + return s.unpackedSrc.GetSignatures(ctx, instanceDigest) +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociArchiveImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil } diff --git a/vendor/github.com/containers/image/oci/archive/oci_transport.go b/vendor/github.com/containers/image/oci/archive/oci_transport.go index 31b19198..c4a4fa71 100644 --- a/vendor/github.com/containers/image/oci/archive/oci_transport.go +++ b/vendor/github.com/containers/image/oci/archive/oci_transport.go @@ -4,13 +4,13 @@ import ( "fmt" "io/ioutil" "os" - "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/internal/tmpdir" + "github.com/containers/image/oci/internal" ocilayout "github.com/containers/image/oci/layout" "github.com/containers/image/transports" "github.com/containers/image/types" @@ -48,51 +48,12 @@ func (t ociArchiveTransport) ParseReference(reference string) (types.ImageRefere // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys func (t ociArchiveTransport) ValidatePolicyConfigurationScope(scope string) error { - var file string - sep := strings.SplitN(scope, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(file, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(file) - if cleaned != file { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var file, image string - sep := strings.SplitN(reference, ":", 2) - file = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + file, image := internal.SplitPathAndImage(reference) return NewReference(file, image) } @@ -102,14 +63,15 @@ func NewReference(file, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", file, image, resolved) + + if err := internal.ValidateOCIPath(file); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err := internal.ValidateImageName(image); err != nil { + return nil, err } + return ociArchiveReference{file: file, resolvedFile: resolved, image: image}, nil } @@ -154,14 +116,17 @@ func (ref ociArchiveReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. -func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociArchiveReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. @@ -194,7 +159,7 @@ func (t *tempDirOCIRef) deleteTempDir() error { // createOCIRef creates the oci reference of the image func createOCIRef(image string) (tempDirOCIRef, error) { - dir, err := ioutil.TempDir("/var/tmp", "oci") + dir, err := ioutil.TempDir(tmpdir.TemporaryDirectoryForBigFiles(), "oci") if err != nil { return tempDirOCIRef{}, errors.Wrapf(err, "error creating temp directory") } diff --git a/vendor/github.com/containers/image/oci/internal/oci_util.go b/vendor/github.com/containers/image/oci/internal/oci_util.go new file mode 100644 index 00000000..c2012e50 --- /dev/null +++ b/vendor/github.com/containers/image/oci/internal/oci_util.go @@ -0,0 +1,126 @@ +package internal + +import ( + "github.com/pkg/errors" + "path/filepath" + "regexp" + "runtime" + "strings" +) + +// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys +const ( + separator = `(?:[-._:@+]|--)` + alphanum = `(?:[A-Za-z0-9]+)` + component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` +) + +var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) +var windowsRefRegexp = regexp.MustCompile(`^([a-zA-Z]:\\.+?):(.*)$`) + +// ValidateImageName returns nil if the image name is empty or matches the open-containers image name specs. +// In any other case an error is returned. +func ValidateImageName(image string) error { + if len(image) == 0 { + return nil + } + + var err error + if !refRegexp.MatchString(image) { + err = errors.Errorf("Invalid image %s", image) + } + return err +} + +// SplitPathAndImage tries to split the provided OCI reference into the OCI path and image. +// Neither path nor image parts are validated at this stage. +func SplitPathAndImage(reference string) (string, string) { + if runtime.GOOS == "windows" { + return splitPathAndImageWindows(reference) + } + return splitPathAndImageNonWindows(reference) +} + +func splitPathAndImageWindows(reference string) (string, string) { + groups := windowsRefRegexp.FindStringSubmatch(reference) + // nil group means no match + if groups == nil { + return reference, "" + } + + // we expect three elements. First one full match, second the capture group for the path and + // the third the capture group for the image + if len(groups) != 3 { + return reference, "" + } + return groups[1], groups[2] +} + +func splitPathAndImageNonWindows(reference string) (string, string) { + sep := strings.SplitN(reference, ":", 2) + path := sep[0] + + var image string + if len(sep) == 2 { + image = sep[1] + } + return path, image +} + +// ValidateOCIPath takes the OCI path and validates it. +func ValidateOCIPath(path string) error { + if runtime.GOOS == "windows" { + // On Windows we must allow for a ':' as part of the path + if strings.Count(path, ":") > 1 { + return errors.Errorf("Invalid OCI reference: path %s contains more than one colon", path) + } + } else { + if strings.Contains(path, ":") { + return errors.Errorf("Invalid OCI reference: path %s contains a colon", path) + } + } + return nil +} + +// ValidateScope validates a policy configuration scope for an OCI transport. +func ValidateScope(scope string) error { + var err error + if runtime.GOOS == "windows" { + err = validateScopeWindows(scope) + } else { + err = validateScopeNonWindows(scope) + } + if err != nil { + return err + } + + cleaned := filepath.Clean(scope) + if cleaned != scope { + return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) + } + + return nil +} + +func validateScopeWindows(scope string) error { + matched, _ := regexp.Match(`^[a-zA-Z]:\\`, []byte(scope)) + if !matched { + return errors.Errorf("Invalid scope '%s'. Must be an absolute path", scope) + } + + return nil +} + +func validateScopeNonWindows(scope string) error { + if !strings.HasPrefix(scope, "/") { + return errors.Errorf("Invalid scope %s: must be an absolute path", scope) + } + + // Refuse also "/", otherwise "/" and "" would have the same semantics, + // and "" could be unexpectedly shadowed by the "/" entry. + if scope == "/" { + return errors.New(`Invalid scope "/": Use the generic default scope ""`) + } + + return nil +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_dest.go b/vendor/github.com/containers/image/oci/layout/oci_dest.go index ce1e0c3e..e95f6516 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_dest.go +++ b/vendor/github.com/containers/image/oci/layout/oci_dest.go @@ -18,21 +18,47 @@ import ( ) type ociImageDestination struct { - ref ociReference - index imgspecv1.Index + ref ociReference + index imgspecv1.Index + sharedBlobDir string } // newImageDestination returns an ImageDestination for writing to an existing directory. -func newImageDestination(ref ociReference) (types.ImageDestination, error) { +func newImageDestination(ctx *types.SystemContext, ref ociReference) (types.ImageDestination, error) { if ref.image == "" { return nil, errors.Errorf("cannot save image with empty image.ref.name") } - index := imgspecv1.Index{ - Versioned: imgspec.Versioned{ - SchemaVersion: 2, - }, + + var index *imgspecv1.Index + if indexExists(ref) { + var err error + index, err = ref.getIndex() + if err != nil { + return nil, err + } + } else { + index = &imgspecv1.Index{ + Versioned: imgspec.Versioned{ + SchemaVersion: 2, + }, + } } - return &ociImageDestination{ref: ref, index: index}, nil + + d := &ociImageDestination{ref: ref, index: *index} + if ctx != nil { + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + + if err := ensureDirectoryExists(d.ref.dir); err != nil { + return nil, err + } + // Per the OCI image specification, layouts MUST have a "blobs" subdirectory, + // but it MAY be empty (e.g. if we never end up calling PutBlob) + // https://github.com/opencontainers/image-spec/blame/7c889fafd04a893f5c5f50b7ab9963d5d64e5242/image-layout.md#L19 + if err := ensureDirectoryExists(filepath.Join(d.ref.dir, "blobs")); err != nil { + return nil, err + } + return d, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -81,16 +107,16 @@ func (d *ociImageDestination) MustMatchRuntimeOS() bool { // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) { - if err := ensureDirectoryExists(d.ref.dir); err != nil { - return types.BlobInfo{}, err - } blobFile, err := ioutil.TempFile(d.ref.dir, "oci-put-blob") if err != nil { return types.BlobInfo{}, err } succeeded := false + explicitClosed := false defer func() { - blobFile.Close() + if !explicitClosed { + blobFile.Close() + } if !succeeded { os.Remove(blobFile.Name()) } @@ -110,17 +136,28 @@ func (d *ociImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo if err := blobFile.Sync(); err != nil { return types.BlobInfo{}, err } - if err := blobFile.Chmod(0644); err != nil { - return types.BlobInfo{}, err + + // On POSIX systems, blobFile was created with mode 0600, so we need to make it readable. + // On Windows, the “permissions of newly created files” argument to syscall.Open is + // ignored and the file is already readable; besides, blobFile.Chmod, i.e. syscall.Fchmod, + // always fails on Windows. + if runtime.GOOS != "windows" { + if err := blobFile.Chmod(0644); err != nil { + return types.BlobInfo{}, err + } } - blobPath, err := d.ref.blobPath(computedDigest) + blobPath, err := d.ref.blobPath(computedDigest, d.sharedBlobDir) if err != nil { return types.BlobInfo{}, err } if err := ensureParentDirectoryExists(blobPath); err != nil { return types.BlobInfo{}, err } + + // need to explicitly close the file, since a rename won't otherwise not work on Windows + blobFile.Close() + explicitClosed = true if err := os.Rename(blobFile.Name(), blobPath); err != nil { return types.BlobInfo{}, err } @@ -136,7 +173,7 @@ func (d *ociImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) if info.Digest == "" { return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`) } - blobPath, err := d.ref.blobPath(info.Digest) + blobPath, err := d.ref.blobPath(info.Digest, d.sharedBlobDir) if err != nil { return false, -1, err } @@ -169,7 +206,7 @@ func (d *ociImageDestination) PutManifest(m []byte) error { desc.MediaType = imgspecv1.MediaTypeImageManifest desc.Size = int64(len(m)) - blobPath, err := d.ref.blobPath(digest) + blobPath, err := d.ref.blobPath(digest, d.sharedBlobDir) if err != nil { return err } @@ -191,23 +228,20 @@ func (d *ociImageDestination) PutManifest(m []byte) error { Architecture: runtime.GOARCH, OS: runtime.GOOS, } - d.index.Manifests = append(d.index.Manifests, desc) + d.addManifest(&desc) return nil } -func ensureDirectoryExists(path string) error { - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - if err := os.MkdirAll(path, 0755); err != nil { - return err +func (d *ociImageDestination) addManifest(desc *imgspecv1.Descriptor) { + for i, manifest := range d.index.Manifests { + if manifest.Annotations["org.opencontainers.image.ref.name"] == desc.Annotations["org.opencontainers.image.ref.name"] { + // TODO Should there first be a cleanup based on the descriptor we are going to replace? + d.index.Manifests[i] = *desc + return } } - return nil -} - -// ensureParentDirectoryExists ensures the parent of the supplied path exists. -func ensureParentDirectoryExists(path string) error { - return ensureDirectoryExists(filepath.Dir(path)) + d.index.Manifests = append(d.index.Manifests, *desc) } func (d *ociImageDestination) PutSignatures(signatures [][]byte) error { @@ -231,3 +265,30 @@ func (d *ociImageDestination) Commit() error { } return ioutil.WriteFile(d.ref.indexPath(), indexJSON, 0644) } + +func ensureDirectoryExists(path string) error { + if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { + if err := os.MkdirAll(path, 0755); err != nil { + return err + } + } + return nil +} + +// ensureParentDirectoryExists ensures the parent of the supplied path exists. +func ensureParentDirectoryExists(path string) error { + return ensureDirectoryExists(filepath.Dir(path)) +} + +// indexExists checks whether the index location specified in the OCI reference exists. +// The implementation is opinionated, since in case of unexpected errors false is returned +func indexExists(ref ociReference) bool { + _, err := os.Stat(ref.indexPath()) + if err == nil { + return true + } + if os.IsNotExist(err) { + return false + } + return true +} diff --git a/vendor/github.com/containers/image/oci/layout/oci_src.go b/vendor/github.com/containers/image/oci/layout/oci_src.go index be8a2aa7..1109f65c 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_src.go +++ b/vendor/github.com/containers/image/oci/layout/oci_src.go @@ -17,9 +17,10 @@ import ( ) type ociImageSource struct { - ref ociReference - descriptor imgspecv1.Descriptor - client *http.Client + ref ociReference + descriptor imgspecv1.Descriptor + client *http.Client + sharedBlobDir string } // newImageSource returns an ImageSource for reading from an existing directory. @@ -40,7 +41,12 @@ func newImageSource(ctx *types.SystemContext, ref ociReference) (types.ImageSour if err != nil { return nil, err } - return &ociImageSource{ref: ref, descriptor: descriptor, client: client}, nil + d := &ociImageSource{ref: ref, descriptor: descriptor, client: client} + if ctx != nil { + // TODO(jonboulle): check dir existence? + d.sharedBlobDir = ctx.OCISharedBlobDirPath + } + return d, nil } // Reference returns the reference used to set up this source. @@ -55,8 +61,26 @@ func (s *ociImageSource) Close() error { // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *ociImageSource) GetManifest() ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest.Digest(s.descriptor.Digest)) +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + var dig digest.Digest + var mimeType string + if instanceDigest == nil { + dig = digest.Digest(s.descriptor.Digest) + mimeType = s.descriptor.MediaType + } else { + dig = *instanceDigest + // XXX: instanceDigest means that we don't immediately have the context of what + // mediaType the manifest has. In OCI this means that we don't know + // what reference it came from, so we just *assume* that its + // MediaTypeImageManifest. + // FIXME: We should actually be able to look up the manifest in the index, + // and see the MIME type there. + mimeType = imgspecv1.MediaTypeImageManifest + } + + manifestPath, err := s.ref.blobPath(dig, s.sharedBlobDir) if err != nil { return nil, "", err } @@ -65,25 +89,7 @@ func (s *ociImageSource) GetManifest() ([]byte, string, error) { return nil, "", err } - return m, s.descriptor.MediaType, nil -} - -func (s *ociImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - manifestPath, err := s.ref.blobPath(digest) - if err != nil { - return nil, "", err - } - - m, err := ioutil.ReadFile(manifestPath) - if err != nil { - return nil, "", err - } - - // XXX: GetTargetManifest means that we don't have the context of what - // mediaType the manifest has. In OCI this means that we don't know - // what reference it came from, so we just *assume* that its - // MediaTypeImageManifest. - return m, imgspecv1.MediaTypeImageManifest, nil + return m, mimeType, nil } // GetBlob returns a stream for the specified blob, and the blob's size. @@ -92,7 +98,7 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return s.getExternalBlob(info.URLs) } - path, err := s.ref.blobPath(info.Digest) + path, err := s.ref.blobPath(info.Digest, s.sharedBlobDir) if err != nil { return nil, 0, err } @@ -108,7 +114,11 @@ func (s *ociImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, err return r, fi.Size(), nil } -func (s *ociImageSource) GetSignatures(context.Context) ([][]byte, error) { +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *ociImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { return [][]byte{}, nil } @@ -133,6 +143,11 @@ func (s *ociImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, e return nil, 0, errWrap } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ociImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + func getBlobSize(resp *http.Response) int64 { size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64) if err != nil { diff --git a/vendor/github.com/containers/image/oci/layout/oci_transport.go b/vendor/github.com/containers/image/oci/layout/oci_transport.go index 312bc0e4..c181c4c7 100644 --- a/vendor/github.com/containers/image/oci/layout/oci_transport.go +++ b/vendor/github.com/containers/image/oci/layout/oci_transport.go @@ -5,12 +5,12 @@ import ( "fmt" "os" "path/filepath" - "regexp" "strings" "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/oci/internal" "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/opencontainers/go-digest" @@ -36,45 +36,12 @@ func (t ociTransport) ParseReference(reference string) (types.ImageReference, er return ParseReference(reference) } -// annotation spex from https://github.com/opencontainers/image-spec/blob/master/annotations.md#pre-defined-annotation-keys -const ( - separator = `(?:[-._:@+]|--)` - alphanum = `(?:[A-Za-z0-9]+)` - component = `(?:` + alphanum + `(?:` + separator + alphanum + `)*)` -) - -var refRegexp = regexp.MustCompile(`^` + component + `(?:/` + component + `)*$`) - // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value). // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion. // scope passed to this function will not be "", that value is always allowed. func (t ociTransport) ValidatePolicyConfigurationScope(scope string) error { - var dir string - sep := strings.SplitN(scope, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image := sep[1] - if !refRegexp.MatchString(image) { - return errors.Errorf("Invalid image %s", image) - } - } - - if !strings.HasPrefix(dir, "/") { - return errors.Errorf("Invalid scope %s: must be an absolute path", scope) - } - // Refuse also "/", otherwise "/" and "" would have the same semantics, - // and "" could be unexpectedly shadowed by the "/" entry. - // (Note: we do allow "/:someimage", a bit ridiculous but why refuse it?) - if scope == "/" { - return errors.New(`Invalid scope "/": Use the generic default scope ""`) - } - cleaned := filepath.Clean(dir) - if cleaned != dir { - return errors.Errorf(`Invalid scope %s: Uses non-canonical path format, perhaps try with path %s`, scope, cleaned) - } - return nil + return internal.ValidateScope(scope) } // ociReference is an ImageReference for OCI directory paths. @@ -92,13 +59,7 @@ type ociReference struct { // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an OCI ImageReference. func ParseReference(reference string) (types.ImageReference, error) { - var dir, image string - sep := strings.SplitN(reference, ":", 2) - dir = sep[0] - - if len(sep) == 2 { - image = sep[1] - } + dir, image := internal.SplitPathAndImage(reference) return NewReference(dir, image) } @@ -111,14 +72,15 @@ func NewReference(dir, image string) (types.ImageReference, error) { if err != nil { return nil, err } - // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces - // from being ambiguous with values of PolicyConfigurationIdentity. - if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OCI reference %s:%s: path %s contains a colon", dir, image, resolved) + + if err := internal.ValidateOCIPath(dir); err != nil { + return nil, err } - if len(image) > 0 && !refRegexp.MatchString(image) { - return nil, errors.Errorf("Invalid image %s", image) + + if err = internal.ValidateImageName(image); err != nil { + return nil, err } + return ociReference{dir: dir, resolvedDir: resolved, image: image}, nil } @@ -177,28 +139,40 @@ func (ref ociReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ociReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref ociReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return image.FromSource(src) + return image.FromSource(ctx, src) +} + +// getIndex returns a pointer to the index references by this ociReference. If an error occurs opening an index nil is returned together +// with an error. +func (ref ociReference) getIndex() (*imgspecv1.Index, error) { + indexJSON, err := os.Open(ref.indexPath()) + if err != nil { + return nil, err + } + defer indexJSON.Close() + + index := &imgspecv1.Index{} + if err := json.NewDecoder(indexJSON).Decode(index); err != nil { + return nil, err + } + return index, nil } func (ref ociReference) getManifestDescriptor() (imgspecv1.Descriptor, error) { - indexJSON, err := os.Open(ref.indexPath()) + index, err := ref.getIndex() if err != nil { return imgspecv1.Descriptor{}, err } - defer indexJSON.Close() - index := imgspecv1.Index{} - if err := json.NewDecoder(indexJSON).Decode(&index); err != nil { - return imgspecv1.Descriptor{}, err - } var d *imgspecv1.Descriptor if ref.image == "" { @@ -250,7 +224,7 @@ func (ref ociReference) NewImageSource(ctx *types.SystemContext) (types.ImageSou // NewImageDestination returns a types.ImageDestination for this reference. // The caller must call .Close() on the returned ImageDestination. func (ref ociReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(ref) + return newImageDestination(ctx, ref) } // DeleteImage deletes the named image from the registry, if supported. @@ -269,9 +243,13 @@ func (ref ociReference) indexPath() string { } // blobPath returns a path for a blob within a directory using OCI image-layout conventions. -func (ref ociReference) blobPath(digest digest.Digest) (string, error) { +func (ref ociReference) blobPath(digest digest.Digest, sharedBlobDir string) (string, error) { if err := digest.Validate(); err != nil { return "", errors.Wrapf(err, "unexpected digest reference %s", digest) } - return filepath.Join(ref.dir, "blobs", digest.Algorithm().String(), digest.Hex()), nil + blobDir := filepath.Join(ref.dir, "blobs") + if sharedBlobDir != "" { + blobDir = sharedBlobDir + } + return filepath.Join(blobDir, digest.Algorithm().String(), digest.Hex()), nil } diff --git a/vendor/github.com/containers/image/openshift/openshift.go b/vendor/github.com/containers/image/openshift/openshift.go index 0117f2e0..54655914 100644 --- a/vendor/github.com/containers/image/openshift/openshift.go +++ b/vendor/github.com/containers/image/openshift/openshift.go @@ -200,20 +200,15 @@ func (s *openshiftImageSource) Close() error { return nil } -func (s *openshiftImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { - if err := s.ensureImageIsResolved(context.TODO()); err != nil { - return nil, "", err - } - return s.docker.GetTargetManifest(digest) -} - // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. -func (s *openshiftImageSource) GetManifest() ([]byte, string, error) { +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { if err := s.ensureImageIsResolved(context.TODO()); err != nil { return nil, "", err } - return s.docker.GetManifest() + return s.docker.GetManifest(instanceDigest) } // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). @@ -224,12 +219,21 @@ func (s *openshiftImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int6 return s.docker.GetBlob(info) } -func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, error) { - if err := s.ensureImageIsResolved(ctx); err != nil { - return nil, err +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (s *openshiftImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + var imageName string + if instanceDigest == nil { + if err := s.ensureImageIsResolved(ctx); err != nil { + return nil, err + } + imageName = s.imageStreamImageName + } else { + imageName = instanceDigest.String() } - - image, err := s.client.getImage(ctx, s.imageStreamImageName) + image, err := s.client.getImage(ctx, imageName) if err != nil { return nil, err } @@ -242,6 +246,11 @@ func (s *openshiftImageSource) GetSignatures(ctx context.Context) ([][]byte, err return sigs, nil } +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *openshiftImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} + // ensureImageIsResolved sets up s.docker and s.imageStreamImageName func (s *openshiftImageSource) ensureImageIsResolved(ctx context.Context) error { if s.docker != nil { diff --git a/vendor/github.com/containers/image/openshift/openshift_transport.go b/vendor/github.com/containers/image/openshift/openshift_transport.go index 7db35d96..686d806f 100644 --- a/vendor/github.com/containers/image/openshift/openshift_transport.go +++ b/vendor/github.com/containers/image/openshift/openshift_transport.go @@ -125,16 +125,17 @@ func (ref openshiftReference) PolicyConfigurationNamespaces() []string { return policyconfiguration.DockerReferenceNamespaces(ref.dockerReference) } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.Image, error) { +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (ref openshiftReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { src, err := newImageSource(ctx, ref) if err != nil { return nil, err } - return genericImage.FromSource(src) + return genericImage.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. diff --git a/vendor/github.com/containers/image/ostree/ostree_dest.go b/vendor/github.com/containers/image/ostree/ostree_dest.go index 26137431..704e1ece 100644 --- a/vendor/github.com/containers/image/ostree/ostree_dest.go +++ b/vendor/github.com/containers/image/ostree/ostree_dest.go @@ -4,6 +4,8 @@ package ostree import ( "bytes" + "compress/gzip" + "encoding/base64" "encoding/json" "fmt" "io" @@ -12,18 +14,27 @@ import ( "os/exec" "path/filepath" "strconv" - "strings" "time" "github.com/containers/image/manifest" "github.com/containers/image/types" "github.com/containers/storage/pkg/archive" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" - "github.com/ostreedev/ostree-go/pkg/otbuiltin" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" ) +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + type blobToImport struct { Size int64 Digest digest.Digest @@ -35,18 +46,24 @@ type descriptor struct { Digest digest.Digest `json:"digest"` } +type fsLayersSchema1 struct { + BlobSum digest.Digest `json:"blobSum"` +} + type manifestSchema struct { - ConfigDescriptor descriptor `json:"config"` - LayersDescriptors []descriptor `json:"layers"` + LayersDescriptors []descriptor `json:"layers"` + FSLayers []fsLayersSchema1 `json:"fsLayers"` } type ostreeImageDestination struct { - ref ostreeReference - manifest string - schema manifestSchema - tmpDirPath string - blobs map[string]*blobToImport - digest digest.Digest + ref ostreeReference + manifest string + schema manifestSchema + tmpDirPath string + blobs map[string]*blobToImport + digest digest.Digest + signaturesLen int + repo *C.struct_OstreeRepo } // newImageDestination returns an ImageDestination for writing to an existing ostree. @@ -55,7 +72,7 @@ func newImageDestination(ref ostreeReference, tmpDirPath string) (types.ImageDes if err := ensureDirectoryExists(tmpDirPath); err != nil { return nil, err } - return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, ""}, nil + return &ostreeImageDestination{ref, "", manifestSchema{}, tmpDirPath, map[string]*blobToImport{}, "", 0, nil}, nil } // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent, @@ -66,6 +83,9 @@ func (d *ostreeImageDestination) Reference() types.ImageReference { // Close removes resources associated with an initialized ImageDestination, if any. func (d *ostreeImageDestination) Close() error { + if d.repo != nil { + C.g_object_unref(C.gpointer(d.repo)) + } return os.RemoveAll(d.tmpDirPath) } @@ -174,6 +194,35 @@ func (d *ostreeImageDestination) ostreeCommit(repo *otbuiltin.Repo, branch strin return err } +func generateTarSplitMetadata(output *bytes.Buffer, file string) error { + mfz := gzip.NewWriter(output) + defer mfz.Close() + metaPacker := storage.NewJSONPacker(mfz) + + stream, err := os.OpenFile(file, os.O_RDONLY, 0) + if err != nil { + return err + } + defer stream.Close() + + gzReader, err := gzip.NewReader(stream) + if err != nil { + return err + } + defer gzReader.Close() + + its, err := asm.NewInputTarStream(gzReader, metaPacker, nil) + if err != nil { + return err + } + + _, err = io.Copy(ioutil.Discard, its) + if err != nil { + return err + } + return nil +} + func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToImport) error { ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) destinationPath := filepath.Join(d.tmpDirPath, blob.Digest.Hex(), "root") @@ -185,6 +234,11 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm os.RemoveAll(destinationPath) }() + var tarSplitOutput bytes.Buffer + if err := generateTarSplitMetadata(&tarSplitOutput, blob.BlobPath); err != nil { + return err + } + if os.Getuid() == 0 { if err := archive.UntarPath(blob.BlobPath, destinationPath); err != nil { return err @@ -202,28 +256,35 @@ func (d *ostreeImageDestination) importBlob(repo *otbuiltin.Repo, blob *blobToIm return err } } + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size), + fmt.Sprintf("tarsplit.output=%s", base64.StdEncoding.EncodeToString(tarSplitOutput.Bytes()))}) + +} + +func (d *ostreeImageDestination) importConfig(repo *otbuiltin.Repo, blob *blobToImport) error { + ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) + destinationPath := filepath.Dir(blob.BlobPath) + return d.ostreeCommit(repo, ostreeBranch, destinationPath, []string{fmt.Sprintf("docker.size=%d", blob.Size)}) } -func (d *ostreeImageDestination) importConfig(blob *blobToImport) error { - ostreeBranch := fmt.Sprintf("ociimage/%s", blob.Digest.Hex()) - - return exec.Command("ostree", "commit", - "--repo", d.ref.repo, - fmt.Sprintf("--add-metadata-string=docker.size=%d", blob.Size), - "--branch", ostreeBranch, filepath.Dir(blob.BlobPath)).Run() -} - func (d *ostreeImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) { - branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) - output, err := exec.Command("ostree", "show", "--repo", d.ref.repo, "--print-metadata-key=docker.size", branch).CombinedOutput() - if err != nil { - if bytes.Index(output, []byte("not found")) >= 0 || bytes.Index(output, []byte("No such")) >= 0 { - return false, -1, nil + + if d.repo == nil { + repo, err := openRepo(d.ref.repo) + if err != nil { + return false, 0, err } - return false, -1, err + d.repo = repo } - size, err := strconv.ParseInt(strings.Trim(string(output), "'\n"), 10, 64) + branch := fmt.Sprintf("ociimage/%s", info.Digest.Hex()) + + found, data, err := readMetadata(d.repo, branch, "docker.size") + if err != nil || !found { + return found, -1, err + } + + size, err := strconv.ParseInt(data, 10, 64) if err != nil { return false, -1, err } @@ -272,6 +333,7 @@ func (d *ostreeImageDestination) PutSignatures(signatures [][]byte) error { return err } } + d.signaturesLen = len(signatures) return nil } @@ -286,24 +348,37 @@ func (d *ostreeImageDestination) Commit() error { return err } - for _, layer := range d.schema.LayersDescriptors { - hash := layer.Digest.Hex() + checkLayer := func(hash string) error { blob := d.blobs[hash] // if the blob is not present in d.blobs then it is already stored in OSTree, // and we don't need to import it. if blob == nil { - continue + return nil } err := d.importBlob(repo, blob) if err != nil { return err } + + delete(d.blobs, hash) + return nil + } + for _, layer := range d.schema.LayersDescriptors { + hash := layer.Digest.Hex() + if err = checkLayer(hash); err != nil { + return err + } + } + for _, layer := range d.schema.FSLayers { + hash := layer.BlobSum.Hex() + if err = checkLayer(hash); err != nil { + return err + } } - hash := d.schema.ConfigDescriptor.Digest.Hex() - blob := d.blobs[hash] - if blob != nil { - err := d.importConfig(blob) + // Import the other blobs that are not layers + for _, blob := range d.blobs { + err := d.importConfig(repo, blob) if err != nil { return err } @@ -311,7 +386,9 @@ func (d *ostreeImageDestination) Commit() error { manifestPath := filepath.Join(d.tmpDirPath, "manifest") - metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), fmt.Sprintf("docker.digest=%s", string(d.digest))} + metadata := []string{fmt.Sprintf("docker.manifest=%s", string(d.manifest)), + fmt.Sprintf("signatures=%d", d.signaturesLen), + fmt.Sprintf("docker.digest=%s", string(d.digest))} err = d.ostreeCommit(repo, fmt.Sprintf("ociimage/%s", d.ref.branchName), manifestPath, metadata) _, err = repo.CommitTransaction() diff --git a/vendor/github.com/containers/image/ostree/ostree_src.go b/vendor/github.com/containers/image/ostree/ostree_src.go new file mode 100644 index 00000000..c65a07b7 --- /dev/null +++ b/vendor/github.com/containers/image/ostree/ostree_src.go @@ -0,0 +1,354 @@ +// +build !containers_image_ostree_stub + +package ostree + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/base64" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "unsafe" + + "github.com/containers/image/manifest" + "github.com/containers/image/types" + "github.com/containers/storage/pkg/ioutils" + "github.com/opencontainers/go-digest" + glib "github.com/ostreedev/ostree-go/pkg/glibobject" + "github.com/pkg/errors" + "github.com/vbatts/tar-split/tar/asm" + "github.com/vbatts/tar-split/tar/storage" +) + +// #cgo pkg-config: glib-2.0 gobject-2.0 ostree-1 +// #include +// #include +// #include +// #include +// #include +// #include +import "C" + +type ostreeImageSource struct { + ref ostreeReference + tmpDir string + repo *C.struct_OstreeRepo +} + +// newImageSource returns an ImageSource for reading from an existing directory. +func newImageSource(ctx *types.SystemContext, tmpDir string, ref ostreeReference) (types.ImageSource, error) { + return &ostreeImageSource{ref: ref, tmpDir: tmpDir}, nil +} + +// Reference returns the reference used to set up this source. +func (s *ostreeImageSource) Reference() types.ImageReference { + return s.ref +} + +// Close removes resources associated with an initialized ImageSource, if any. +func (s *ostreeImageSource) Close() error { + if s.repo != nil { + C.g_object_unref(C.gpointer(s.repo)) + } + return nil +} + +func (s *ostreeImageSource) getLayerSize(blob string) (int64, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, data, err := readMetadata(s.repo, b, "docker.size") + if err != nil || !found { + return 0, err + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getLenSignatures() (int64, error) { + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, data, err := readMetadata(s.repo, b, "signatures") + if err != nil { + return -1, err + } + if !found { + // if 'signatures' is not present, just return 0 signatures. + return 0, nil + } + return strconv.ParseInt(data, 10, 64) +} + +func (s *ostreeImageSource) getTarSplitData(blob string) ([]byte, error) { + b := fmt.Sprintf("ociimage/%s", blob) + found, out, err := readMetadata(s.repo, b, "tarsplit.output") + if err != nil || !found { + return nil, err + } + return base64.StdEncoding.DecodeString(out) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +func (s *ostreeImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", errors.Errorf(`Manifest lists are not supported by "ostree:"`) + } + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, "", err + } + s.repo = repo + } + + b := fmt.Sprintf("ociimage/%s", s.ref.branchName) + found, out, err := readMetadata(s.repo, b, "docker.manifest") + if err != nil { + return nil, "", err + } + if !found { + return nil, "", errors.New("manifest not found") + } + m := []byte(out) + return m, manifest.GuessMIMEType(m), nil +} + +func (s *ostreeImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) { + return nil, "", errors.New("manifest lists are not supported by this transport") +} + +func openRepo(path string) (*C.struct_OstreeRepo, error) { + var cerr *C.GError + cpath := C.CString(path) + defer C.free(unsafe.Pointer(cpath)) + pathc := C.g_file_new_for_path(cpath) + defer C.g_object_unref(C.gpointer(pathc)) + repo := C.ostree_repo_new(pathc) + r := glib.GoBool(glib.GBoolean(C.ostree_repo_open(repo, nil, &cerr))) + if !r { + C.g_object_unref(C.gpointer(repo)) + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + return repo, nil +} + +type ostreePathFileGetter struct { + repo *C.struct_OstreeRepo + parentRoot *C.GFile +} + +type ostreeReader struct { + stream *C.GFileInputStream +} + +func (o ostreeReader) Close() error { + C.g_object_unref(C.gpointer(o.stream)) + return nil +} +func (o ostreeReader) Read(p []byte) (int, error) { + var cerr *C.GError + instanceCast := C.g_type_check_instance_cast((*C.GTypeInstance)(unsafe.Pointer(o.stream)), C.g_input_stream_get_type()) + stream := (*C.GInputStream)(unsafe.Pointer(instanceCast)) + + b := C.g_input_stream_read_bytes(stream, (C.gsize)(cap(p)), nil, &cerr) + if b == nil { + return 0, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_bytes_unref(b) + + count := int(C.g_bytes_get_size(b)) + if count == 0 { + return 0, io.EOF + } + data := (*[1 << 30]byte)(unsafe.Pointer(C.g_bytes_get_data(b, nil)))[:count:count] + copy(p, data) + return count, nil +} + +func readMetadata(repo *C.struct_OstreeRepo, commit, key string) (bool, string, error) { + var cerr *C.GError + var ref *C.char + defer C.free(unsafe.Pointer(ref)) + + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + + if !glib.GoBool(glib.GBoolean(C.ostree_repo_resolve_rev(repo, cCommit, C.gboolean(1), &ref, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + if ref == nil { + return false, "", nil + } + + var variant *C.GVariant + if !glib.GoBool(glib.GBoolean(C.ostree_repo_load_variant(repo, C.OSTREE_OBJECT_TYPE_COMMIT, ref, &variant, &cerr))) { + return false, "", glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + defer C.g_variant_unref(variant) + if variant != nil { + cKey := C.CString(key) + defer C.free(unsafe.Pointer(cKey)) + + metadata := C.g_variant_get_child_value(variant, 0) + defer C.g_variant_unref(metadata) + + data := C.g_variant_lookup_value(metadata, (*C.gchar)(cKey), nil) + if data != nil { + defer C.g_variant_unref(data) + ptr := (*C.char)(C.g_variant_get_string(data, nil)) + val := C.GoString(ptr) + return true, val, nil + } + } + return false, "", nil +} + +func newOSTreePathFileGetter(repo *C.struct_OstreeRepo, commit string) (*ostreePathFileGetter, error) { + var cerr *C.GError + var parentRoot *C.GFile + cCommit := C.CString(commit) + defer C.free(unsafe.Pointer(cCommit)) + if !glib.GoBool(glib.GBoolean(C.ostree_repo_read_commit(repo, cCommit, &parentRoot, nil, nil, &cerr))) { + return &ostreePathFileGetter{}, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + C.g_object_ref(C.gpointer(repo)) + + return &ostreePathFileGetter{repo: repo, parentRoot: parentRoot}, nil +} + +func (o ostreePathFileGetter) Get(filename string) (io.ReadCloser, error) { + var file *C.GFile + if strings.HasPrefix(filename, "./") { + filename = filename[2:] + } + cfilename := C.CString(filename) + defer C.free(unsafe.Pointer(cfilename)) + + file = (*C.GFile)(C.g_file_resolve_relative_path(o.parentRoot, cfilename)) + + var cerr *C.GError + stream := C.g_file_read(file, nil, &cerr) + if stream == nil { + return nil, glib.ConvertGError(glib.ToGError(unsafe.Pointer(cerr))) + } + + return &ostreeReader{stream: stream}, nil +} + +func (o ostreePathFileGetter) Close() { + C.g_object_unref(C.gpointer(o.repo)) + C.g_object_unref(C.gpointer(o.parentRoot)) +} + +func (s *ostreeImageSource) readSingleFile(commit, path string) (io.ReadCloser, error) { + getter, err := newOSTreePathFileGetter(s.repo, commit) + if err != nil { + return nil, err + } + defer getter.Close() + + return getter.Get(path) +} + +// GetBlob returns a stream for the specified blob, and the blob's size. +func (s *ostreeImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) { + blob := info.Digest.Hex() + branch := fmt.Sprintf("ociimage/%s", blob) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, 0, err + } + s.repo = repo + } + + layerSize, err := s.getLayerSize(blob) + if err != nil { + return nil, 0, err + } + + tarsplit, err := s.getTarSplitData(blob) + if err != nil { + return nil, 0, err + } + + // if tarsplit is nil we are looking at the manifest. Return directly the file in /content + if tarsplit == nil { + file, err := s.readSingleFile(branch, "/content") + if err != nil { + return nil, 0, err + } + return file, layerSize, nil + } + + mf := bytes.NewReader(tarsplit) + mfz, err := gzip.NewReader(mf) + if err != nil { + return nil, 0, err + } + defer mfz.Close() + metaUnpacker := storage.NewJSONUnpacker(mfz) + + getter, err := newOSTreePathFileGetter(s.repo, branch) + if err != nil { + return nil, 0, err + } + + ots := asm.NewOutputTarStream(getter, metaUnpacker) + + pipeReader, pipeWriter := io.Pipe() + go func() { + io.Copy(pipeWriter, ots) + pipeWriter.Close() + }() + + rc := ioutils.NewReadCloserWrapper(pipeReader, func() error { + getter.Close() + return ots.Close() + }) + return rc, layerSize, nil +} + +func (s *ostreeImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, errors.New("manifest lists are not supported by this transport") + } + lenSignatures, err := s.getLenSignatures() + if err != nil { + return nil, err + } + branch := fmt.Sprintf("ociimage/%s", s.ref.branchName) + + if s.repo == nil { + repo, err := openRepo(s.ref.repo) + if err != nil { + return nil, err + } + s.repo = repo + } + + signatures := [][]byte{} + for i := int64(1); i <= lenSignatures; i++ { + sigReader, err := s.readSingleFile(branch, fmt.Sprintf("/signature-%d", i)) + if err != nil { + return nil, err + } + defer sigReader.Close() + + sig, err := ioutil.ReadAll(sigReader) + if err != nil { + return nil, err + } + signatures = append(signatures, sig) + } + return signatures, nil +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (s *ostreeImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/ostree/ostree_transport.go b/vendor/github.com/containers/image/ostree/ostree_transport.go index 0de74a71..cc85a43f 100644 --- a/vendor/github.com/containers/image/ostree/ostree_transport.go +++ b/vendor/github.com/containers/image/ostree/ostree_transport.go @@ -10,12 +10,12 @@ import ( "regexp" "strings" - "github.com/pkg/errors" - "github.com/containers/image/directory/explicitfilepath" "github.com/containers/image/docker/reference" + "github.com/containers/image/image" "github.com/containers/image/transports" "github.com/containers/image/types" + "github.com/pkg/errors" ) const defaultOSTreeRepo = "/ostree/repo" @@ -66,6 +66,11 @@ type ostreeReference struct { repo string } +type ostreeImageCloser struct { + types.ImageCloser + size int64 +} + func (t ostreeTransport) ParseReference(ref string) (types.ImageReference, error) { var repo = "" var image = "" @@ -110,7 +115,7 @@ func NewReference(image string, repo string) (types.ImageReference, error) { // This is necessary to prevent directory paths returned by PolicyConfigurationNamespaces // from being ambiguous with values of PolicyConfigurationIdentity. if strings.Contains(resolved, ":") { - return nil, errors.Errorf("Invalid OSTreeCI reference %s@%s: path %s contains a colon", image, repo, resolved) + return nil, errors.Errorf("Invalid OSTree reference %s@%s: path %s contains a colon", image, repo, resolved) } return ostreeReference{ @@ -168,18 +173,38 @@ func (ref ostreeReference) PolicyConfigurationNamespaces() []string { return res } -// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. -// The caller must call .Close() on the returned Image. +func (s *ostreeImageCloser) Size() (int64, error) { + return s.size, nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. -func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return nil, errors.New("Reading ostree: images is currently not supported") +func (ref ostreeReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + src, err := newImageSource(ctx, tmpDir, ref) + if err != nil { + return nil, err + } + return image.FromSource(ctx, src) } // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. func (ref ostreeReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { - return nil, errors.New("Reading ostree: images is currently not supported") + var tmpDir string + if ctx == nil || ctx.OSTreeTmpDirPath == "" { + tmpDir = os.TempDir() + } else { + tmpDir = ctx.OSTreeTmpDirPath + } + return newImageSource(ctx, tmpDir, ref) } // NewImageDestination returns a types.ImageDestination for this reference. diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go index bc6c5e9a..42cc12ab 100644 --- a/vendor/github.com/containers/image/signature/policy_config.go +++ b/vendor/github.com/containers/image/signature/policy_config.go @@ -70,7 +70,11 @@ func NewPolicyFromFile(fileName string) (*Policy, error) { if err != nil { return nil, err } - return NewPolicyFromBytes(contents) + policy, err := NewPolicyFromBytes(contents) + if err != nil { + return nil, errors.Wrapf(err, "invalid policy in %q", fileName) + } + return policy, nil } // NewPolicyFromBytes returns a policy parsed from the specified blob. diff --git a/vendor/github.com/containers/image/storage/storage_image.go b/vendor/github.com/containers/image/storage/storage_image.go index 08fa71b5..038195c1 100644 --- a/vendor/github.com/containers/image/storage/storage_image.go +++ b/vendor/github.com/containers/image/storage/storage_image.go @@ -1,14 +1,17 @@ +// +build !containers_image_storage_stub + package storage import ( "bytes" "context" "encoding/json" + "fmt" "io" "io/ioutil" - "time" - - "github.com/pkg/errors" + "os" + "path/filepath" + "sync/atomic" "github.com/containers/image/image" "github.com/containers/image/manifest" @@ -16,10 +19,14 @@ import ( "github.com/containers/storage" "github.com/containers/storage/pkg/archive" "github.com/containers/storage/pkg/ioutils" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs. + var ( // ErrBlobDigestMismatch is returned when PutBlob() is given a blob // with a digest-based name that doesn't match its contents. @@ -27,8 +34,8 @@ var ( // ErrBlobSizeMismatch is returned when PutBlob() is given a blob // with an expected size that doesn't match the reader. ErrBlobSizeMismatch = errors.New("blob size mismatch") - // ErrNoManifestLists is returned when GetTargetManifest() is - // called. + // ErrNoManifestLists is returned when GetManifest() is called. + // with a non-nil instanceDigest. ErrNoManifestLists = errors.New("manifest lists are not supported by this transport") // ErrNoSuchImage is returned when we attempt to access an image which // doesn't exist in the storage area. @@ -37,256 +44,318 @@ var ( type storageImageSource struct { imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - LayerPosition map[ddigest.Digest]int `json:"-"` // Where we are in reading a blob's layers - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + ID string + layerPosition map[digest.Digest]int // Where we are in reading a blob's layers + cachedManifest []byte // A cached copy of the manifest, if already known, or nil + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } type storageImageDestination struct { - imageRef storageReference - Tag string `json:"tag,omitempty"` - Created time.Time `json:"created-time,omitempty"` - ID string `json:"id"` - BlobList []types.BlobInfo `json:"blob-list,omitempty"` // Ordered list of every blob the image has been told to handle - Layers map[ddigest.Digest][]string `json:"layers,omitempty"` // Map from digests of blobs to lists of layer IDs - BlobData map[ddigest.Digest][]byte `json:"-"` // Map from names of blobs that aren't layers to contents, temporary - Manifest []byte `json:"-"` // Manifest contents, temporary - Signatures []byte `json:"-"` // Signature contents, temporary - SignatureSizes []int `json:"signature-sizes"` // List of sizes of each signature slice + image types.ImageCloser + systemContext *types.SystemContext + imageRef storageReference // The reference we'll use to name the image + publicRef storageReference // The reference we return when asked about the name we'll give to the image + directory string // Temporary directory where we store blobs until Commit() time + nextTempFileID int32 // A counter that we use for computing filenames to assign to blobs + manifest []byte // Manifest contents, temporary + signatures []byte // Signature contents, temporary + blobDiffIDs map[digest.Digest]digest.Digest // Mapping from layer blobsums to their corresponding DiffIDs + fileSizes map[digest.Digest]int64 // Mapping from layer blobsums to their sizes + filenames map[digest.Digest]string // Mapping from layer blobsums to names of files we used to hold them + SignatureSizes []int `json:"signature-sizes,omitempty"` // List of sizes of each signature slice } -type storageLayerMetadata struct { - Digest string `json:"digest,omitempty"` - Size int64 `json:"size"` - CompressedSize int64 `json:"compressed-size,omitempty"` -} - -type storageImage struct { - types.Image +type storageImageCloser struct { + types.ImageCloser size int64 } -// newImageSource sets us up to read out an image, which needs to already exist. +// newImageSource sets up an image for reading. func newImageSource(imageRef storageReference) (*storageImageSource, error) { + // First, locate the image. img, err := imageRef.resolveImage() if err != nil { return nil, err } + + // Build the reader object. image := &storageImageSource{ imageRef: imageRef, - Created: time.Now(), ID: img.ID, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - LayerPosition: make(map[ddigest.Digest]int), + layerPosition: make(map[digest.Digest]int), SignatureSizes: []int{}, } - if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { - return nil, errors.Wrap(err, "error decoding metadata for source image") - } - return image, nil -} - -// newImageDestination sets us up to write a new image. -func newImageDestination(imageRef storageReference) (*storageImageDestination, error) { - image := &storageImageDestination{ - imageRef: imageRef, - Tag: imageRef.reference, - Created: time.Now(), - ID: imageRef.id, - BlobList: []types.BlobInfo{}, - Layers: make(map[ddigest.Digest][]string), - BlobData: make(map[ddigest.Digest][]byte), - SignatureSizes: []int{}, + if img.Metadata != "" { + if err := json.Unmarshal([]byte(img.Metadata), image); err != nil { + return nil, errors.Wrap(err, "error decoding metadata for source image") + } } return image, nil } +// Reference returns the image reference that we used to find this image. func (s storageImageSource) Reference() types.ImageReference { return s.imageRef } -func (s storageImageDestination) Reference() types.ImageReference { - return s.imageRef -} - +// Close cleans up any resources we tied up while reading the image. func (s storageImageSource) Close() error { return nil } -func (s storageImageDestination) Close() error { - return nil +// GetBlob reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { + rc, n, _, err = s.getBlobAndLayerID(info) + return rc, n, err } +// getBlobAndLayer reads the data blob or filesystem layer which matches the digest and size, if given. +func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { + var layer storage.Layer + var diffOptions *storage.DiffOptions + // We need a valid digest value. + err = info.Digest.Validate() + if err != nil { + return nil, -1, "", err + } + // Check if the blob corresponds to a diff that was used to initialize any layers. Our + // callers should try to retrieve layers using their uncompressed digests, so no need to + // check if they're using one of the compressed digests, which we can't reproduce anyway. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(info.Digest) + // If it's not a layer, then it must be a data item. + if len(layers) == 0 { + b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) + if err != nil { + return nil, -1, "", err + } + r := bytes.NewReader(b) + logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) + return ioutil.NopCloser(r), int64(r.Len()), "", nil + } + // Step through the list of matching layers. Tests may want to verify that if we have multiple layers + // which claim to have the same contents, that we actually do have multiple layers, otherwise we could + // just go ahead and use the first one every time. + i := s.layerPosition[info.Digest] + s.layerPosition[info.Digest] = i + 1 + if len(layers) > 0 { + layer = layers[i%len(layers)] + } + // Force the storage layer to not try to match any compression that was used when the layer was first + // handed to it. + noCompression := archive.Uncompressed + diffOptions = &storage.DiffOptions{ + Compression: &noCompression, + } + if layer.UncompressedSize < 0 { + n = -1 + } else { + n = layer.UncompressedSize + } + logrus.Debugf("exporting filesystem layer %q without compression for blob %q", layer.ID, info.Digest) + rc, err = s.imageRef.transport.store.Diff("", layer.ID, diffOptions) + if err != nil { + return nil, -1, "", err + } + return rc, n, layer.ID, err +} + +// GetManifest() reads the image's manifest. +func (s *storageImageSource) GetManifest(instanceDigest *digest.Digest) (manifestBlob []byte, MIMEType string, err error) { + if instanceDigest != nil { + return nil, "", ErrNoManifestLists + } + if len(s.cachedManifest) == 0 { + // We stored the manifest as an item named after storage.ImageDigestBigDataKey. + cachedBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, storage.ImageDigestBigDataKey) + if err != nil { + return nil, "", err + } + s.cachedManifest = cachedBlob + } + return s.cachedManifest, manifest.GuessMIMEType(s.cachedManifest), err +} + +// LayerInfosForCopy() returns the list of layer blobs that make up the root filesystem of +// the image, after they've been decompressed. +func (s *storageImageSource) LayerInfosForCopy() []types.BlobInfo { + simg, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + logrus.Errorf("error reading image %q: %v", s.ID, err) + return nil + } + updatedBlobInfos := []types.BlobInfo{} + layerID := simg.TopLayer + _, manifestType, err := s.GetManifest(nil) + if err != nil { + logrus.Errorf("error reading image manifest for %q: %v", s.ID, err) + return nil + } + uncompressedLayerType := "" + switch manifestType { + case imgspecv1.MediaTypeImageManifest: + uncompressedLayerType = imgspecv1.MediaTypeImageLayer + case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema2MediaType: + // This is actually a compressed type, but there's no uncompressed type defined + uncompressedLayerType = manifest.DockerV2Schema2LayerMediaType + } + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + logrus.Errorf("error reading layer %q in image %q: %v", layerID, s.ID, err) + return nil + } + if layer.UncompressedDigest == "" { + logrus.Errorf("uncompressed digest for layer %q is unknown", layerID) + return nil + } + if layer.UncompressedSize < 0 { + logrus.Errorf("uncompressed size for layer %q is unknown", layerID) + return nil + } + blobInfo := types.BlobInfo{ + Digest: layer.UncompressedDigest, + Size: layer.UncompressedSize, + MediaType: uncompressedLayerType, + } + updatedBlobInfos = append([]types.BlobInfo{blobInfo}, updatedBlobInfos...) + layerID = layer.Parent + } + return updatedBlobInfos +} + +// GetSignatures() parses the image's signatures blob into a slice of byte slices. +func (s *storageImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) (signatures [][]byte, err error) { + if instanceDigest != nil { + return nil, ErrNoManifestLists + } + var offset int + sigslice := [][]byte{} + signature := []byte{} + if len(s.SignatureSizes) > 0 { + signatureBlob, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") + if err != nil { + return nil, errors.Wrapf(err, "error looking up signatures data for image %q", s.ID) + } + signature = signatureBlob + } + for _, length := range s.SignatureSizes { + sigslice = append(sigslice, signature[offset:offset+length]) + offset += length + } + if offset != len(signature) { + return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) + } + return sigslice, nil +} + +// newImageDestination sets us up to write a new image, caching blobs in a temporary directory until +// it's time to Commit() the image +func newImageDestination(ctx *types.SystemContext, imageRef storageReference) (*storageImageDestination, error) { + directory, err := ioutil.TempDir(temporaryDirectoryForBigFiles, "storage") + if err != nil { + return nil, errors.Wrapf(err, "error creating a temporary directory") + } + // Break reading of the reference we're writing, so that copy.Image() won't try to rewrite + // schema1 image manifests to remove embedded references, since that changes the manifest's + // digest, and that makes the image unusable if we subsequently try to access it using a + // reference that mentions the no-longer-correct digest. + publicRef := imageRef + publicRef.name = nil + image := &storageImageDestination{ + systemContext: ctx, + imageRef: imageRef, + publicRef: publicRef, + directory: directory, + blobDiffIDs: make(map[digest.Digest]digest.Digest), + fileSizes: make(map[digest.Digest]int64), + filenames: make(map[digest.Digest]string), + SignatureSizes: []int{}, + } + return image, nil +} + +// Reference returns a mostly-usable image reference that can't return a DockerReference, to +// avoid triggering logic in copy.Image() that rewrites schema 1 image manifests in order to +// remove image names that they contain which don't match the value we're using. +func (s storageImageDestination) Reference() types.ImageReference { + return s.publicRef +} + +// Close cleans up the temporary directory. +func (s *storageImageDestination) Close() error { + return os.RemoveAll(s.directory) +} + +// ShouldCompressLayers indicates whether or not a caller should compress not-already-compressed +// data when handing it to us. func (s storageImageDestination) ShouldCompressLayers() bool { - // We ultimately have to decompress layers to populate trees on disk, - // so callers shouldn't bother compressing them before handing them to - // us, if they're not already compressed. + // We ultimately have to decompress layers to populate trees on disk, so callers shouldn't + // bother compressing them before handing them to us, if they're not already compressed. return false } -// putBlob stores a layer or data blob, optionally enforcing that a digest in -// blobinfo matches the incoming data. -func (s *storageImageDestination) putBlob(stream io.Reader, blobinfo types.BlobInfo, enforceDigestAndSize bool) (types.BlobInfo, error) { - blobSize := blobinfo.Size - digest := blobinfo.Digest +// PutBlob stores a layer or data blob in our temporary directory, checking that any information +// in the blobinfo matches the incoming data. +func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { errorBlobInfo := types.BlobInfo{ Digest: "", Size: -1, } - // Try to read an initial snippet of the blob. - buf := [archive.HeaderSize]byte{} - n, err := io.ReadAtLeast(stream, buf[:], len(buf)) - if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { - return errorBlobInfo, err - } - // Set up to read the whole blob (the initial snippet, plus the rest) - // while digesting it with either the default, or the passed-in digest, - // if one was specified. - hasher := ddigest.Canonical.Digester() - if digest.Validate() == nil { - if a := digest.Algorithm(); a.Available() { + // Set up to digest the blob and count its size while saving it to a file. + hasher := digest.Canonical.Digester() + if blobinfo.Digest.Validate() == nil { + if a := blobinfo.Digest.Algorithm(); a.Available() { hasher = a.Digester() } } - hash := "" + diffID := digest.Canonical.Digester() + filename := filepath.Join(s.directory, fmt.Sprintf("%d", atomic.AddInt32(&s.nextTempFileID, 1))) + file, err := os.OpenFile(filename, os.O_CREATE|os.O_TRUNC|os.O_WRONLY|os.O_EXCL, 0600) + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error creating temporary file %q", filename) + } + defer file.Close() counter := ioutils.NewWriteCounter(hasher.Hash()) - defragmented := io.MultiReader(bytes.NewBuffer(buf[:n]), stream) - multi := io.TeeReader(defragmented, counter) - if (n > 0) && archive.IsArchive(buf[:n]) { - // It's a filesystem layer. If it's not the first one in the - // image, we assume that the most recently added layer is its - // parent. - parentLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - parentLayer = layerList[len(layerList)-1] - } - } - // If we have an expected content digest, generate a layer ID - // based on the parent's ID and the expected content digest. - id := "" - if digest.Validate() == nil { - id = ddigest.Canonical.FromBytes([]byte(parentLayer + "+" + digest.String())).Hex() - } - // Attempt to create the identified layer and import its contents. - layer, uncompressedSize, err := s.imageRef.transport.store.PutLayer(id, parentLayer, nil, "", true, multi) - if err != nil && errors.Cause(err) != storage.ErrDuplicateID { - logrus.Debugf("error importing layer blob %q as %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - if errors.Cause(err) == storage.ErrDuplicateID { - // We specified an ID, and there's already a layer with - // the same ID. Drain the input so that we can look at - // its length and digest. - _, err := io.Copy(ioutil.Discard, multi) - if err != nil && err != io.EOF { - logrus.Debugf("error digesting layer blob %q: %v", blobinfo.Digest, id, err) - return errorBlobInfo, err - } - hash = hasher.Digest().String() - } else { - // Applied the layer with the specified ID. Note the - // size info and computed digest. - hash = hasher.Digest().String() - layerMeta := storageLayerMetadata{ - Digest: hash, - CompressedSize: counter.Count, - Size: uncompressedSize, - } - if metadata, err := json.Marshal(&layerMeta); len(metadata) != 0 && err == nil { - s.imageRef.transport.store.SetMetadata(layer.ID, string(metadata)) - } - // Hang on to the new layer's ID. - id = layer.ID - } - // Check if the size looks right. - if enforceDigestAndSize && blobinfo.Size >= 0 && blobinfo.Size != counter.Count { - logrus.Debugf("layer blob %q size is %d, not %d, rejecting", blobinfo.Digest, counter.Count, blobinfo.Size) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobSizeMismatch - } - // If the content digest was specified, verify it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("layer blob %q digests to %q, rejecting", blobinfo.Digest, hash) - if layer != nil { - // Something's wrong; delete the newly-created layer. - s.imageRef.transport.store.DeleteLayer(layer.ID) - } - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = counter.Count - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Record that this layer blob is a layer, and the layer ID it - // ended up having. This is a list, in case the same blob is - // being applied more than once. - s.Layers[digest] = append(s.Layers[digest], id) - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: counter.Count}) - if layer != nil { - logrus.Debugf("blob %q imported as a filesystem layer %q", blobinfo.Digest, id) - } else { - logrus.Debugf("layer blob %q already present as layer %q", blobinfo.Digest, id) - } - } else { - // It's just data. Finish scanning it in, check that our - // computed digest matches the passed-in digest, and store it, - // but leave it out of the blob-to-layer-ID map so that we can - // tell that it's not a layer. - blob, err := ioutil.ReadAll(multi) - if err != nil && err != io.EOF { - return errorBlobInfo, err - } - hash = hasher.Digest().String() - if enforceDigestAndSize && blobinfo.Size >= 0 && int64(len(blob)) != blobinfo.Size { - logrus.Debugf("blob %q size is %d, not %d, rejecting", blobinfo.Digest, int64(len(blob)), blobinfo.Size) - return errorBlobInfo, ErrBlobSizeMismatch - } - // If we were given a digest, verify that the content matches - // it. - if enforceDigestAndSize && digest.Validate() == nil && digest.String() != hash { - logrus.Debugf("blob %q digests to %q, rejecting", blobinfo.Digest, hash) - return errorBlobInfo, ErrBlobDigestMismatch - } - // If we didn't get a blob size, return the one we calculated. - if blobSize == -1 { - blobSize = int64(len(blob)) - } - // If we didn't get a digest, construct one. - if digest == "" { - digest = ddigest.Digest(hash) - } - // Save the blob for when we Commit(). - s.BlobData[digest] = blob - s.BlobList = append(s.BlobList, types.BlobInfo{Digest: digest, Size: int64(len(blob))}) - logrus.Debugf("blob %q imported as opaque data %q", blobinfo.Digest, digest) + reader := io.TeeReader(io.TeeReader(stream, counter), file) + decompressed, err := archive.DecompressStream(reader) + if err != nil { + return errorBlobInfo, errors.Wrap(err, "error setting up to decompress blob") + } + // Copy the data to the file. + _, err = io.Copy(diffID.Hash(), decompressed) + decompressed.Close() + if err != nil { + return errorBlobInfo, errors.Wrapf(err, "error storing blob to file %q", filename) + } + // Ensure that any information that we were given about the blob is correct. + if blobinfo.Digest.Validate() == nil && blobinfo.Digest != hasher.Digest() { + return errorBlobInfo, ErrBlobDigestMismatch + } + if blobinfo.Size >= 0 && blobinfo.Size != counter.Count { + return errorBlobInfo, ErrBlobSizeMismatch + } + // Record information about the blob. + s.blobDiffIDs[hasher.Digest()] = diffID.Digest() + s.fileSizes[hasher.Digest()] = counter.Count + s.filenames[hasher.Digest()] = filename + blobDigest := blobinfo.Digest + if blobDigest.Validate() != nil { + blobDigest = hasher.Digest() + } + blobSize := blobinfo.Size + if blobSize < 0 { + blobSize = counter.Count } return types.BlobInfo{ - Digest: digest, - Size: blobSize, + Digest: blobDigest, + Size: blobSize, + MediaType: blobinfo.MediaType, }, nil } -// PutBlob is used to both store filesystem layers and binary data that is part -// of the image. Filesystem layers are assumed to be imported in order, as -// that is required by some of the underlying storage drivers. -func (s *storageImageDestination) PutBlob(stream io.Reader, blobinfo types.BlobInfo) (types.BlobInfo, error) { - return s.putBlob(stream, blobinfo, true) -} - -// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob. +// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be +// reapplied using ReapplyBlob. +// // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned. // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); // it returns a non-nil error only on an unexpected failure. @@ -294,93 +363,289 @@ func (s *storageImageDestination) HasBlob(blobinfo types.BlobInfo) (bool, int64, if blobinfo.Digest == "" { return false, -1, errors.Errorf(`Can not check for a blob with unknown digest`) } - for _, blob := range s.BlobList { - if blob.Digest == blobinfo.Digest { - return true, blob.Size, nil - } + if err := blobinfo.Digest.Validate(); err != nil { + return false, -1, errors.Wrapf(err, `Can not check for a blob with invalid digest`) } + // Check if we've already cached it in a file. + if size, ok := s.fileSizes[blobinfo.Digest]; ok { + return true, size, nil + } + // Check if we have a wasn't-compressed layer in storage that's based on that blob. + layers, err := s.imageRef.transport.store.LayersByUncompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Save this for completeness. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].UncompressedSize, nil + } + // Check if we have a was-compressed layer in storage that's based on that blob. + layers, err = s.imageRef.transport.store.LayersByCompressedDigest(blobinfo.Digest) + if err != nil && errors.Cause(err) != storage.ErrLayerUnknown { + return false, -1, errors.Wrapf(err, `Error looking for compressed layers with digest %q`, blobinfo.Digest) + } + if len(layers) > 0 { + // Record the uncompressed value so that we can use it to calculate layer IDs. + s.blobDiffIDs[blobinfo.Digest] = layers[0].UncompressedDigest + return true, layers[0].CompressedSize, nil + } + // Nope, we don't have it. return false, -1, nil } +// ReapplyBlob is now a no-op, assuming HasBlob() says we already have it, since Commit() can just apply the +// same one when it walks the list in the manifest. func (s *storageImageDestination) ReapplyBlob(blobinfo types.BlobInfo) (types.BlobInfo, error) { - err := blobinfo.Digest.Validate() - if err != nil { - return types.BlobInfo{}, err + present, size, err := s.HasBlob(blobinfo) + if !present { + return types.BlobInfo{}, errors.Errorf("error reapplying blob %+v: blob was not previously applied", blobinfo) } - if layerList, ok := s.Layers[blobinfo.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, blobinfo.Digest.String()) - if err != nil { - return types.BlobInfo{}, err + if err != nil { + return types.BlobInfo{}, errors.Wrapf(err, "error reapplying blob %+v", blobinfo) + } + blobinfo.Size = size + return blobinfo, nil +} + +// computeID computes a recommended image ID based on information we have so far. If +// the manifest is not of a type that we recognize, we return an empty value, indicating +// that since we don't have a recommendation, a random ID should be used if one needs +// to be allocated. +func (s *storageImageDestination) computeID(m manifest.Manifest) string { + // Build the diffID list. We need the decompressed sums that we've been calculating to + // fill in the DiffIDs. It's expected (but not enforced by us) that the number of + // diffIDs corresponds to the number of non-EmptyLayer entries in the history. + var diffIDs []digest.Digest + switch m.(type) { + case *manifest.Schema1: + // Build a list of the diffIDs we've generated for the non-throwaway FS layers, + // in reverse of the order in which they were originally listed. + s1, ok := m.(*manifest.Schema1) + if !ok { + // Shouldn't happen + logrus.Debugf("internal error reading schema 1 manifest") + return "" } - return types.BlobInfo{Digest: blobinfo.Digest, Size: int64(len(b))}, nil + for i, history := range s1.History { + compat := manifest.Schema1V1Compatibility{} + if err := json.Unmarshal([]byte(history.V1Compatibility), &compat); err != nil { + logrus.Debugf("internal error reading schema 1 history: %v", err) + return "" + } + if compat.ThrowAway { + continue + } + blobSum := s1.FSLayers[i].BlobSum + diffID, ok := s.blobDiffIDs[blobSum] + if !ok { + logrus.Infof("error looking up diffID for layer %q", blobSum.String()) + return "" + } + diffIDs = append([]digest.Digest{diffID}, diffIDs...) + } + case *manifest.Schema2, *manifest.OCI1: + // We know the ID calculation for these formats doesn't actually use the diffIDs, + // so we don't need to populate the diffID list. } - layerList := s.Layers[blobinfo.Digest] - rc, _, err := diffLayer(s.imageRef.transport.store, layerList[len(layerList)-1]) + id, err := m.ImageID(diffIDs) if err != nil { - return types.BlobInfo{}, err + return "" } - return s.putBlob(rc, blobinfo, false) + return id +} + +// getConfigBlob exists only to let us retrieve the configuration blob so that the manifest package can dig +// information out of it for Inspect(). +func (s *storageImageDestination) getConfigBlob(info types.BlobInfo) ([]byte, error) { + if info.Digest == "" { + return nil, errors.Errorf(`no digest supplied when reading blob`) + } + if err := info.Digest.Validate(); err != nil { + return nil, errors.Wrapf(err, `invalid digest supplied when reading blob`) + } + // Assume it's a file, since we're only calling this from a place that expects to read files. + if filename, ok := s.filenames[info.Digest]; ok { + contents, err2 := ioutil.ReadFile(filename) + if err2 != nil { + return nil, errors.Wrapf(err2, `error reading blob from file %q`, filename) + } + return contents, nil + } + // If it's not a file, it's a bug, because we're not expecting to be asked for a layer. + return nil, errors.New("blob not found") } func (s *storageImageDestination) Commit() error { - // Create the image record. - lastLayer := "" - for _, blob := range s.BlobList { - if layerList, ok := s.Layers[blob.Digest]; ok { - lastLayer = layerList[len(layerList)-1] - } + // Find the list of layer blobs. + if len(s.manifest) == 0 { + return errors.New("Internal error: storageImageDestination.Commit() called without PutManifest()") } - img, err := s.imageRef.transport.store.CreateImage(s.ID, nil, lastLayer, "", nil) + man, err := manifest.FromBlob(s.manifest, manifest.GuessMIMEType(s.manifest)) + if err != nil { + return errors.Wrapf(err, "error parsing manifest") + } + layerBlobs := man.LayerInfos() + // Extract or find the layers. + lastLayer := "" + addedLayers := []string{} + for _, blob := range layerBlobs { + var diff io.ReadCloser + // Check if there's already a layer with the ID that we'd give to the result of applying + // this layer blob to its parent, if it has one, or the blob's hex value otherwise. + diffID, haveDiffID := s.blobDiffIDs[blob.Digest] + if !haveDiffID { + // Check if it's elsewhere and the caller just forgot to pass it to us in a PutBlob(), + // or to even check if we had it. + logrus.Debugf("looking for diffID for blob %+v", blob.Digest) + has, _, err := s.HasBlob(blob) + if err != nil { + return errors.Wrapf(err, "error checking for a layer based on blob %q", blob.Digest.String()) + } + if !has { + return errors.Errorf("error determining uncompressed digest for blob %q", blob.Digest.String()) + } + diffID, haveDiffID = s.blobDiffIDs[blob.Digest] + if !haveDiffID { + return errors.Errorf("we have blob %q, but don't know its uncompressed digest", blob.Digest.String()) + } + } + id := diffID.Hex() + if lastLayer != "" { + id = digest.Canonical.FromBytes([]byte(lastLayer + "+" + diffID.Hex())).Hex() + } + if layer, err2 := s.imageRef.transport.store.Layer(id); layer != nil && err2 == nil { + // There's already a layer that should have the right contents, just reuse it. + lastLayer = layer.ID + continue + } + // Check if we cached a file with that blobsum. If we didn't already have a layer with + // the blob's contents, we should have gotten a copy. + if filename, ok := s.filenames[blob.Digest]; ok { + // Use the file's contents to initialize the layer. + file, err2 := os.Open(filename) + if err2 != nil { + return errors.Wrapf(err2, "error opening file %q", filename) + } + defer file.Close() + diff = file + } + if diff == nil { + // Try to find a layer with contents matching that blobsum. + layer := "" + layers, err2 := s.imageRef.transport.store.LayersByUncompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } else { + layers, err2 = s.imageRef.transport.store.LayersByCompressedDigest(blob.Digest) + if err2 == nil && len(layers) > 0 { + layer = layers[0].ID + } + } + if layer == "" { + return errors.Wrapf(err2, "error locating layer for blob %q", blob.Digest) + } + // Use the layer's contents to initialize the new layer. + noCompression := archive.Uncompressed + diffOptions := &storage.DiffOptions{ + Compression: &noCompression, + } + diff, err2 = s.imageRef.transport.store.Diff("", layer, diffOptions) + if err2 != nil { + return errors.Wrapf(err2, "error reading layer %q for blob %q", layer, blob.Digest) + } + defer diff.Close() + } + if diff == nil { + // This shouldn't have happened. + return errors.Errorf("error applying blob %q: content not found", blob.Digest) + } + // Build the new layer using the diff, regardless of where it came from. + layer, _, err := s.imageRef.transport.store.PutLayer(id, lastLayer, nil, "", false, diff) + if err != nil { + return errors.Wrapf(err, "error adding layer with blob %q", blob.Digest) + } + lastLayer = layer.ID + addedLayers = append([]string{lastLayer}, addedLayers...) + } + // If one of those blobs was a configuration blob, then we can try to dig out the date when the image + // was originally created, in case we're just copying it. If not, no harm done. + options := &storage.ImageOptions{} + if inspect, err := man.Inspect(s.getConfigBlob); err == nil { + logrus.Debugf("setting image creation date to %s", inspect.Created) + options.CreationDate = inspect.Created + } + if manifestDigest, err := manifest.Digest(s.manifest); err == nil { + options.Digest = manifestDigest + } + // Create the image record, pointing to the most-recently added layer. + intendedID := s.imageRef.id + if intendedID == "" { + intendedID = s.computeID(man) + } + oldNames := []string{} + img, err := s.imageRef.transport.store.CreateImage(intendedID, nil, lastLayer, "", options) if err != nil { if errors.Cause(err) != storage.ErrDuplicateID { logrus.Debugf("error creating image: %q", err) - return errors.Wrapf(err, "error creating image %q", s.ID) + return errors.Wrapf(err, "error creating image %q", intendedID) } - img, err = s.imageRef.transport.store.Image(s.ID) + img, err = s.imageRef.transport.store.Image(intendedID) if err != nil { - return errors.Wrapf(err, "error reading image %q", s.ID) + return errors.Wrapf(err, "error reading image %q", intendedID) } if img.TopLayer != lastLayer { - logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", s.ID) - return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", s.ID) + logrus.Debugf("error creating image: image with ID %q exists, but uses different layers", intendedID) + return errors.Wrapf(storage.ErrDuplicateID, "image with ID %q already exists, but uses a different top layer", intendedID) } logrus.Debugf("reusing image ID %q", img.ID) + oldNames = append(oldNames, img.Names...) } else { logrus.Debugf("created new image ID %q", img.ID) } - s.ID = img.ID - names := img.Names - if s.Tag != "" { - names = append(names, s.Tag) + // Add the non-layer blobs as data items. Since we only share layers, they should all be in files, so + // we just need to screen out the ones that are actually layers to get the list of non-layers. + dataBlobs := make(map[digest.Digest]struct{}) + for blob := range s.filenames { + dataBlobs[blob] = struct{}{} } - // We have names to set, so move those names to this image. - if len(names) > 0 { + for _, layerBlob := range layerBlobs { + delete(dataBlobs, layerBlob.Digest) + } + for blob := range dataBlobs { + v, err := ioutil.ReadFile(s.filenames[blob]) + if err != nil { + return errors.Wrapf(err, "error copying non-layer blob %q to image", blob) + } + if err := s.imageRef.transport.store.SetImageBigData(img.ID, blob.String(), v); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving big data %q for image %q: %v", blob.String(), img.ID, err) + return errors.Wrapf(err, "error saving big data %q for image %q", blob.String(), img.ID) + } + } + // Set the reference's name on the image. + if name := s.imageRef.DockerReference(); len(oldNames) > 0 || name != nil { + names := []string{} + if name != nil { + names = append(names, verboseName(name)) + } + if len(oldNames) > 0 { + names = append(names, oldNames...) + } if err := s.imageRef.transport.store.SetNames(img.ID, names); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } - logrus.Debugf("error setting names on image %q: %v", img.ID, err) - return err + logrus.Debugf("error setting names %v on image %q: %v", names, img.ID, err) + return errors.Wrapf(err, "error setting names %v on image %q", names, img.ID) } logrus.Debugf("set names of image %q to %v", img.ID, names) } - // Save the data blobs to disk, and drop their contents from memory. - keys := []ddigest.Digest{} - for k, v := range s.BlobData { - if err := s.imageRef.transport.store.SetImageBigData(img.ID, k.String(), v); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) - } - logrus.Debugf("error saving big data %q for image %q: %v", k, img.ID, err) - return err - } - keys = append(keys, k) - } - for _, key := range keys { - delete(s.BlobData, key) - } - // Save the manifest, if we have one. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "manifest", s.Manifest); err != nil { + // Save the manifest. Use storage.ImageDigestBigDataKey as the item's + // name, so that its digest can be used to locate the image in the Store. + if err := s.imageRef.transport.store.SetImageBigData(img.ID, storage.ImageDigestBigDataKey, s.manifest); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -388,12 +653,14 @@ func (s *storageImageDestination) Commit() error { return err } // Save the signatures, if we have any. - if err := s.imageRef.transport.store.SetImageBigData(s.ID, "signatures", s.Signatures); err != nil { - if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { - logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + if len(s.signatures) > 0 { + if err := s.imageRef.transport.store.SetImageBigData(img.ID, "signatures", s.signatures); err != nil { + if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { + logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) + } + logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) + return err } - logrus.Debugf("error saving signatures for image %q: %v", img.ID, err) - return err } // Save our metadata. metadata, err := json.Marshal(s) @@ -405,7 +672,7 @@ func (s *storageImageDestination) Commit() error { return err } if len(metadata) != 0 { - if err = s.imageRef.transport.store.SetMetadata(s.ID, string(metadata)); err != nil { + if err = s.imageRef.transport.store.SetMetadata(img.ID, string(metadata)); err != nil { if _, err2 := s.imageRef.transport.store.DeleteImage(img.ID, true); err2 != nil { logrus.Debugf("error deleting incomplete image %q: %v", img.ID, err2) } @@ -418,7 +685,7 @@ func (s *storageImageDestination) Commit() error { } var manifestMIMETypes = []string{ - // TODO(runcom): we'll add OCI as part of another PR here + imgspecv1.MediaTypeImageManifest, manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType, @@ -428,23 +695,20 @@ func (s *storageImageDestination) SupportedManifestMIMETypes() []string { return manifestMIMETypes } -// PutManifest writes manifest to the destination. -// FIXME? This should also receive a MIME type if known, to differentiate between schema versions. -// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema), -// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError. +// PutManifest writes the manifest to the destination. func (s *storageImageDestination) PutManifest(manifest []byte) error { - s.Manifest = make([]byte, len(manifest)) - copy(s.Manifest, manifest) + s.manifest = make([]byte, len(manifest)) + copy(s.manifest, manifest) return nil } -// SupportsSignatures returns an error if we can't expect GetSignatures() to -// return data that was previously supplied to PutSignatures(). +// SupportsSignatures returns an error if we can't expect GetSignatures() to return data that was +// previously supplied to PutSignatures(). func (s *storageImageDestination) SupportsSignatures() error { return nil } -// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually +// AcceptsForeignLayerURLs returns false iff foreign layers in the manifest should actually be // uploaded to the image destination, true otherwise. func (s *storageImageDestination) AcceptsForeignLayerURLs() bool { return false @@ -455,6 +719,7 @@ func (s *storageImageDestination) MustMatchRuntimeOS() bool { return true } +// PutSignatures records the image's signatures for committing as a single data blob. func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { sizes := []int{} sigblob := []byte{} @@ -465,146 +730,73 @@ func (s *storageImageDestination) PutSignatures(signatures [][]byte) error { copy(newblob[len(sigblob):], sig) sigblob = newblob } - s.Signatures = sigblob + s.signatures = sigblob s.SignatureSizes = sizes return nil } -func (s *storageImageSource) GetBlob(info types.BlobInfo) (rc io.ReadCloser, n int64, err error) { - rc, n, _, err = s.getBlobAndLayerID(info) - return rc, n, err -} - -func (s *storageImageSource) getBlobAndLayerID(info types.BlobInfo) (rc io.ReadCloser, n int64, layerID string, err error) { - err = info.Digest.Validate() - if err != nil { - return nil, -1, "", err - } - if layerList, ok := s.Layers[info.Digest]; !ok || len(layerList) < 1 { - b, err := s.imageRef.transport.store.ImageBigData(s.ID, info.Digest.String()) - if err != nil { - return nil, -1, "", err - } - r := bytes.NewReader(b) - logrus.Debugf("exporting opaque data as blob %q", info.Digest.String()) - return ioutil.NopCloser(r), int64(r.Len()), "", nil - } - // If the blob was "put" more than once, we have multiple layer IDs - // which should all produce the same diff. For the sake of tests that - // want to make sure we created different layers each time the blob was - // "put", though, cycle through the layers. - layerList := s.Layers[info.Digest] - position, ok := s.LayerPosition[info.Digest] - if !ok { - position = 0 - } - s.LayerPosition[info.Digest] = (position + 1) % len(layerList) - logrus.Debugf("exporting filesystem layer %q for blob %q", layerList[position], info.Digest) - rc, n, err = diffLayer(s.imageRef.transport.store, layerList[position]) - return rc, n, layerList[position], err -} - -func diffLayer(store storage.Store, layerID string) (rc io.ReadCloser, n int64, err error) { - layer, err := store.Layer(layerID) - if err != nil { - return nil, -1, err - } - layerMeta := storageLayerMetadata{ - CompressedSize: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return nil, -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.CompressedSize <= 0 { - n = -1 - } else { - n = layerMeta.CompressedSize - } - diff, err := store.Diff("", layer.ID, nil) - if err != nil { - return nil, -1, err - } - return diff, n, nil -} - -func (s *storageImageSource) GetManifest() (manifestBlob []byte, MIMEType string, err error) { - manifestBlob, err = s.imageRef.transport.store.ImageBigData(s.ID, "manifest") - return manifestBlob, manifest.GuessMIMEType(manifestBlob), err -} - -func (s *storageImageSource) GetTargetManifest(digest ddigest.Digest) (manifestBlob []byte, MIMEType string, err error) { - return nil, "", ErrNoManifestLists -} - -func (s *storageImageSource) GetSignatures(ctx context.Context) (signatures [][]byte, err error) { - var offset int - signature, err := s.imageRef.transport.store.ImageBigData(s.ID, "signatures") - if err != nil { - return nil, err - } - sigslice := [][]byte{} - for _, length := range s.SignatureSizes { - sigslice = append(sigslice, signature[offset:offset+length]) - offset += length - } - if offset != len(signature) { - return nil, errors.Errorf("signatures data contained %d extra bytes", len(signatures)-offset) - } - return sigslice, nil -} - +// getSize() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. func (s *storageImageSource) getSize() (int64, error) { var sum int64 - names, err := s.imageRef.transport.store.ListImageBigData(s.imageRef.id) + // Size up the data blobs. + dataNames, err := s.imageRef.transport.store.ListImageBigData(s.ID) if err != nil { - return -1, errors.Wrapf(err, "error reading image %q", s.imageRef.id) + return -1, errors.Wrapf(err, "error reading image %q", s.ID) } - for _, name := range names { - bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.imageRef.id, name) + for _, dataName := range dataNames { + bigSize, err := s.imageRef.transport.store.ImageBigDataSize(s.ID, dataName) if err != nil { - return -1, errors.Wrapf(err, "error reading data blob size %q for %q", name, s.imageRef.id) + return -1, errors.Wrapf(err, "error reading data blob size %q for %q", dataName, s.ID) } sum += bigSize } + // Add the signature sizes. for _, sigSize := range s.SignatureSizes { sum += int64(sigSize) } - for _, layerList := range s.Layers { - for _, layerID := range layerList { - layer, err := s.imageRef.transport.store.Layer(layerID) - if err != nil { - return -1, err - } - layerMeta := storageLayerMetadata{ - Size: -1, - } - if layer.Metadata != "" { - if err := json.Unmarshal([]byte(layer.Metadata), &layerMeta); err != nil { - return -1, errors.Wrapf(err, "error decoding metadata for layer %q", layerID) - } - } - if layerMeta.Size < 0 { - return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) - } - sum += layerMeta.Size + // Prepare to walk the layer list. + img, err := s.imageRef.transport.store.Image(s.ID) + if err != nil { + return -1, errors.Wrapf(err, "error reading image info %q", s.ID) + } + // Walk the layer list. + layerID := img.TopLayer + for layerID != "" { + layer, err := s.imageRef.transport.store.Layer(layerID) + if err != nil { + return -1, err } + if layer.UncompressedDigest == "" || layer.UncompressedSize < 0 { + return -1, errors.Errorf("size for layer %q is unknown, failing getSize()", layerID) + } + sum += layer.UncompressedSize + if layer.Parent == "" { + break + } + layerID = layer.Parent } return sum, nil } -func (s *storageImage) Size() (int64, error) { +// Size() adds up the sizes of the image's data blobs (which includes the configuration blob), the +// signatures, and the uncompressed sizes of all of the image's layers. +func (s *storageImageSource) Size() (int64, error) { + return s.getSize() +} + +// Size() returns the previously-computed size of the image, with no error. +func (s *storageImageCloser) Size() (int64, error) { return s.size, nil } // newImage creates an image that also knows its size -func newImage(s storageReference) (types.Image, error) { +func newImage(ctx *types.SystemContext, s storageReference) (types.ImageCloser, error) { src, err := newImageSource(s) if err != nil { return nil, err } - img, err := image.FromSource(src) + img, err := image.FromSource(ctx, src) if err != nil { return nil, err } @@ -612,5 +804,5 @@ func newImage(s storageReference) (types.Image, error) { if err != nil { return nil, err } - return &storageImage{Image: img, size: size}, nil + return &storageImageCloser{ImageCloser: img, size: size}, nil } diff --git a/vendor/github.com/containers/image/storage/storage_reference.go b/vendor/github.com/containers/image/storage/storage_reference.go index ded58705..96887142 100644 --- a/vendor/github.com/containers/image/storage/storage_reference.go +++ b/vendor/github.com/containers/image/storage/storage_reference.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -6,6 +8,7 @@ import ( "github.com/containers/image/docker/reference" "github.com/containers/image/types" "github.com/containers/storage" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -18,9 +21,11 @@ type storageReference struct { reference string id string name reference.Named + tag string + digest digest.Digest } -func newReference(transport storageTransport, reference, id string, name reference.Named) *storageReference { +func newReference(transport storageTransport, reference, id string, name reference.Named, tag string, digest digest.Digest) *storageReference { // We take a copy of the transport, which contains a pointer to the // store that it used for resolving this reference, so that the // transport that we'll return from Transport() won't be affected by @@ -30,6 +35,8 @@ func newReference(transport storageTransport, reference, id string, name referen reference: reference, id: id, name: name, + tag: tag, + digest: digest, } } @@ -37,11 +44,32 @@ func newReference(transport storageTransport, reference, id string, name referen // one present with the same name or ID, and return the image. func (s *storageReference) resolveImage() (*storage.Image, error) { if s.id == "" { + // Look for an image that has the expanded reference name as an explicit Name value. image, err := s.transport.store.Image(s.reference) if image != nil && err == nil { s.id = image.ID } } + if s.id == "" && s.name != nil && s.digest != "" { + // Look for an image with the specified digest that has the same name, + // though possibly with a different tag or digest, as a Name value, so + // that the canonical reference can be implicitly resolved to the image. + images, err := s.transport.store.ImagesByDigest(s.digest) + if images != nil && err == nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) + search: + for _, image := range images { + for _, name := range image.Names { + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + s.id = image.ID + break search + } + } + } + } + } + } if s.id == "" { logrus.Errorf("reference %q does not resolve to an image ID", s.StringWithinTransport()) return nil, ErrNoSuchImage @@ -50,12 +78,15 @@ func (s *storageReference) resolveImage() (*storage.Image, error) { if err != nil { return nil, errors.Wrapf(err, "error reading image %q", s.id) } - if s.reference != "" { + if s.name != nil { + repo := reference.FamiliarName(reference.TrimNamed(s.name)) nameMatch := false for _, name := range img.Names { - if name == s.reference { - nameMatch = true - break + if named, err := reference.ParseNormalizedNamed(name); err == nil { + if reference.FamiliarName(reference.TrimNamed(named)) == repo { + nameMatch = true + break + } } } if !nameMatch { @@ -76,8 +107,21 @@ func (s storageReference) Transport() types.ImageTransport { } } -// Return a name with a tag, if we have a name to base them on. +// Return a name with a tag or digest, if we have either, else return it bare. func (s storageReference) DockerReference() reference.Named { + if s.name == nil { + return nil + } + if s.tag != "" { + if namedTagged, err := reference.WithTag(s.name, s.tag); err == nil { + return namedTagged + } + } + if s.digest != "" { + if canonical, err := reference.WithDigest(s.name, s.digest); err == nil { + return canonical + } + } return s.name } @@ -91,7 +135,7 @@ func (s storageReference) StringWithinTransport() string { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + s.transport.store.GraphDriverName() + "@" + s.transport.store.GraphRoot() + "+" + s.transport.store.RunRoot() + optionsList + "]" - if s.name == nil { + if s.reference == "" { return storeSpec + "@" + s.id } if s.id == "" { @@ -120,11 +164,8 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { driverlessStoreSpec := "[" + s.transport.store.GraphRoot() + "]" namespaces := []string{} if s.name != nil { - if s.id != "" { - // The reference without the ID is also a valid namespace. - namespaces = append(namespaces, storeSpec+s.reference) - } - components := strings.Split(s.name.Name(), "/") + name := reference.TrimNamed(s.name) + components := strings.Split(name.String(), "/") for len(components) > 0 { namespaces = append(namespaces, storeSpec+strings.Join(components, "/")) components = components[:len(components)-1] @@ -135,8 +176,13 @@ func (s storageReference) PolicyConfigurationNamespaces() []string { return namespaces } -func (s storageReference) NewImage(ctx *types.SystemContext) (types.Image, error) { - return newImage(s) +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (s storageReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + return newImage(ctx, s) } func (s storageReference) DeleteImage(ctx *types.SystemContext) error { @@ -159,5 +205,5 @@ func (s storageReference) NewImageSource(ctx *types.SystemContext) (types.ImageS } func (s storageReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { - return newImageDestination(s) + return newImageDestination(ctx, s) } diff --git a/vendor/github.com/containers/image/storage/storage_transport.go b/vendor/github.com/containers/image/storage/storage_transport.go index 1a0ebd04..f6ebcdc4 100644 --- a/vendor/github.com/containers/image/storage/storage_transport.go +++ b/vendor/github.com/containers/image/storage/storage_transport.go @@ -1,3 +1,5 @@ +// +build !containers_image_storage_stub + package storage import ( @@ -11,11 +13,14 @@ import ( "github.com/containers/image/types" "github.com/containers/storage" "github.com/containers/storage/pkg/idtools" - "github.com/opencontainers/go-digest" - ddigest "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" "github.com/sirupsen/logrus" ) +const ( + minimumTruncatedIDLength = 3 +) + func init() { transports.Register(Transport) } @@ -101,69 +106,133 @@ func (s *storageTransport) DefaultGIDMap() []idtools.IDMap { // relative to the given store, and returns it in a reference object. func (s storageTransport) ParseStoreReference(store storage.Store, ref string) (*storageReference, error) { var name reference.Named - var sum digest.Digest - var err error if ref == "" { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "%q is an empty reference") } if ref[0] == '[' { // Ignore the store specifier. closeIndex := strings.IndexRune(ref, ']') if closeIndex < 1 { - return nil, ErrInvalidReference + return nil, errors.Wrapf(ErrInvalidReference, "store specifier in %q did not end", ref) } ref = ref[closeIndex+1:] } - refInfo := strings.SplitN(ref, "@", 2) - if len(refInfo) == 1 { - // A name. - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err + + // The last segment, if there's more than one, is either a digest from a reference, or an image ID. + split := strings.LastIndex(ref, "@") + idOrDigest := "" + if split != -1 { + // Peel off that last bit so that we can work on the rest. + idOrDigest = ref[split+1:] + if idOrDigest == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) } - } else if len(refInfo) == 2 { - // An ID, possibly preceded by a name. - if refInfo[0] != "" { - name, err = reference.ParseNormalizedNamed(refInfo[0]) - if err != nil { - return nil, err - } - } - sum, err = digest.Parse(refInfo[1]) - if err != nil || sum.Validate() != nil { - sum, err = digest.Parse("sha256:" + refInfo[1]) - if err != nil || sum.Validate() != nil { - return nil, err - } - } - } else { // Coverage: len(refInfo) is always 1 or 2 - // Anything else: store specified in a form we don't - // recognize. - return nil, ErrInvalidReference + ref = ref[:split] } + + // The middle segment (now the last segment), if there is one, is a digest. + split = strings.LastIndex(ref, "@") + sum := digest.Digest("") + if split != -1 { + sum = digest.Digest(ref[split+1:]) + if sum == "" { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + ref = ref[:split] + } + + // If we have something that unambiguously should be a digest, validate it, and then the third part, + // if we have one, as an ID. + id := "" + if sum != "" { + if idSum, err := digest.Parse("sha256:" + idOrDigest); err != nil || idSum.Validate() != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image ID", idOrDigest) + } + if err := sum.Validate(); err != nil { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like an image digest", sum) + } + id = idOrDigest + if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // The ID is a truncated version of the ID of an image that's present in local storage, + // so we might as well use the expanded value. + id = img.ID + } + } else if idOrDigest != "" { + // There was no middle portion, so the final portion could be either a digest or an ID. + if idSum, err := digest.Parse("sha256:" + idOrDigest); err == nil && idSum.Validate() == nil { + // It's an ID. + id = idOrDigest + } else if idSum, err := digest.Parse(idOrDigest); err == nil && idSum.Validate() == nil { + // It's a digest. + sum = idSum + } else if img, err := store.Image(idOrDigest); err == nil && img != nil && len(idOrDigest) >= minimumTruncatedIDLength && strings.HasPrefix(img.ID, idOrDigest) { + // It's a truncated version of the ID of an image that's present in local storage, + // and we may need the expanded value. + id = img.ID + } else { + return nil, errors.Wrapf(ErrInvalidReference, "%q does not look like a digest or image ID", idOrDigest) + } + } + + // If we only had one portion, then _maybe_ it's a truncated image ID. Only check on that if it's + // at least of what we guess is a reasonable minimum length, because we don't want a really short value + // like "a" matching an image by ID prefix when the input was actually meant to specify an image name. + if len(ref) >= minimumTruncatedIDLength && sum == "" && id == "" { + if img, err := store.Image(ref); err == nil && img != nil && strings.HasPrefix(img.ID, ref) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + id = img.ID + ref = "" + } + } + + // The initial portion is probably a name, possibly with a tag. + if ref != "" { + var err error + if name, err = reference.ParseNormalizedNamed(ref); err != nil { + return nil, errors.Wrapf(err, "error parsing named reference %q", ref) + } + } + if name == nil && sum == "" && id == "" { + return nil, errors.Errorf("error parsing reference") + } + + // Construct a copy of the store spec. optionsList := "" options := store.GraphOptions() if len(options) > 0 { optionsList = ":" + strings.Join(options, ",") } storeSpec := "[" + store.GraphDriverName() + "@" + store.GraphRoot() + "+" + store.RunRoot() + optionsList + "]" - id := "" - if sum.Validate() == nil { - id = sum.Hex() - } + + // Convert the name back into a reference string, if we got a name. refname := "" + tag := "" if name != nil { - name = reference.TagNameOnly(name) - refname = verboseName(name) + if sum.Validate() == nil { + canonical, err := reference.WithDigest(name, sum) + if err != nil { + return nil, errors.Wrapf(err, "error mixing name %q with digest %q", name, sum) + } + refname = verboseName(canonical) + } else { + name = reference.TagNameOnly(name) + tagged, ok := name.(reference.Tagged) + if !ok { + return nil, errors.Errorf("error parsing possibly-tagless name %q", ref) + } + refname = verboseName(name) + tag = tagged.Tag() + } } if refname == "" { - logrus.Debugf("parsed reference into %q", storeSpec+"@"+id) + logrus.Debugf("parsed reference to id into %q", storeSpec+"@"+id) } else if id == "" { - logrus.Debugf("parsed reference into %q", storeSpec+refname) + logrus.Debugf("parsed reference to refname into %q", storeSpec+refname) } else { - logrus.Debugf("parsed reference into %q", storeSpec+refname+"@"+id) + logrus.Debugf("parsed reference to refname@id into %q", storeSpec+refname+"@"+id) } - return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name), nil + return newReference(storageTransport{store: store, defaultUIDMap: s.defaultUIDMap, defaultGIDMap: s.defaultGIDMap}, refname, id, name, tag, sum), nil } func (s *storageTransport) GetStore() (storage.Store, error) { @@ -182,11 +251,14 @@ func (s *storageTransport) GetStore() (storage.Store, error) { return s.store, nil } -// ParseReference takes a name and/or an ID ("_name_"/"@_id_"/"_name_@_id_"), +// ParseReference takes a name and a tag or digest and/or ID +// ("_name_"/"@_id_"/"_name_:_tag_"/"_name_:_tag_@_id_"/"_name_@_digest_"/"_name_@_digest_@_id_"), // possibly prefixed with a store specifier in the form "[_graphroot_]" or // "[_driver_@_graphroot_]" or "[_driver_@_graphroot_+_runroot_]" or // "[_driver_@_graphroot_:_options_]" or "[_driver_@_graphroot_+_runroot_:_options_]", // tries to figure out which it is, and returns it in a reference object. +// If _id_ is the ID of an image that's present in local storage, it can be truncated, and +// even be specified as if it were a _name_, value. func (s *storageTransport) ParseReference(reference string) (types.ImageReference, error) { var store storage.Store // Check if there's a store location prefix. If there is, then it @@ -265,17 +337,23 @@ func (s *storageTransport) ParseReference(reference string) (types.ImageReferenc func (s storageTransport) GetStoreImage(store storage.Store, ref types.ImageReference) (*storage.Image, error) { dref := ref.DockerReference() - if dref == nil { - if sref, ok := ref.(*storageReference); ok { - if sref.id != "" { - if img, err := store.Image(sref.id); err == nil { - return img, nil - } + if dref != nil { + if img, err := store.Image(verboseName(dref)); err == nil { + return img, nil + } + } + if sref, ok := ref.(*storageReference); ok { + if sref.id != "" { + if img, err := store.Image(sref.id); err == nil { + return img, nil } } - return nil, ErrInvalidReference + tmpRef := *sref + if img, err := tmpRef.resolveImage(); err == nil { + return img, nil + } } - return store.Image(verboseName(dref)) + return nil, storage.ErrImageUnknown } func (s *storageTransport) GetImage(ref types.ImageReference) (*storage.Image, error) { @@ -335,7 +413,7 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { if err != nil { return err } - _, err = ddigest.Parse("sha256:" + scopeInfo[1]) + _, err = digest.Parse("sha256:" + scopeInfo[1]) if err != nil { return err } @@ -345,11 +423,28 @@ func (s storageTransport) ValidatePolicyConfigurationScope(scope string) error { return nil } -func verboseName(name reference.Named) string { - name = reference.TagNameOnly(name) - tag := "" - if tagged, ok := name.(reference.NamedTagged); ok { - tag = ":" + tagged.Tag() +func verboseName(r reference.Reference) string { + if r == nil { + return "" } - return name.Name() + tag + named, isNamed := r.(reference.Named) + digested, isDigested := r.(reference.Digested) + tagged, isTagged := r.(reference.Tagged) + name := "" + tag := "" + sum := "" + if isNamed { + name = (reference.TrimNamed(named)).String() + } + if isTagged { + if tagged.Tag() != "" { + tag = ":" + tagged.Tag() + } + } + if isDigested { + if digested.Digest().Validate() == nil { + sum = "@" + digested.Digest().String() + } + } + return name + tag + sum } diff --git a/vendor/github.com/containers/image/tarball/doc.go b/vendor/github.com/containers/image/tarball/doc.go new file mode 100644 index 00000000..a6ced5a0 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/doc.go @@ -0,0 +1,48 @@ +// Package tarball provides a way to generate images using one or more layer +// tarballs and an optional template configuration. +// +// An example: +// package main +// +// import ( +// "fmt" +// +// cp "github.com/containers/image/copy" +// "github.com/containers/image/tarball" +// "github.com/containers/image/transports/alltransports" +// +// imgspecv1 "github.com/containers/image/transports/alltransports" +// ) +// +// func imageFromTarball() { +// src, err := alltransports.ParseImageName("tarball:/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// // - or - +// // src, err := tarball.Transport.ParseReference("/var/cache/mock/fedora-26-x86_64/root_cache/cache.tar.gz") +// if err != nil { +// panic(err) +// } +// updater, ok := src.(tarball.ConfigUpdater) +// if !ok { +// panic("unexpected: a tarball reference should implement tarball.ConfigUpdater") +// } +// config := imgspecv1.Image{ +// Config: imgspecv1.ImageConfig{ +// Cmd: []string{"/bin/bash"}, +// }, +// } +// annotations := make(map[string]string) +// annotations[imgspecv1.AnnotationDescription] = "test image built from a mock root cache" +// err = updater.ConfigUpdate(config, annotations) +// if err != nil { +// panic(err) +// } +// dest, err := alltransports.ParseImageName("docker-daemon:mock:latest") +// if err != nil { +// panic(err) +// } +// err = cp.Image(nil, dest, src, nil) +// if err != nil { +// panic(err) +// } +// } +package tarball diff --git a/vendor/github.com/containers/image/tarball/tarball_reference.go b/vendor/github.com/containers/image/tarball/tarball_reference.go new file mode 100644 index 00000000..4ccfb406 --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_reference.go @@ -0,0 +1,93 @@ +package tarball + +import ( + "fmt" + "os" + "strings" + + "github.com/containers/image/docker/reference" + "github.com/containers/image/image" + "github.com/containers/image/types" + + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ConfigUpdater is an interface that ImageReferences for "tarball" images also +// implement. It can be used to set values for a configuration, and to set +// image annotations which will be present in the images returned by the +// reference's NewImage() or NewImageSource() methods. +type ConfigUpdater interface { + ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error +} + +type tarballReference struct { + transport types.ImageTransport + config imgspecv1.Image + annotations map[string]string + filenames []string + stdin []byte +} + +// ConfigUpdate updates the image's default configuration and adds annotations +// which will be visible in source images created using this reference. +func (r *tarballReference) ConfigUpdate(config imgspecv1.Image, annotations map[string]string) error { + r.config = config + if r.annotations == nil { + r.annotations = make(map[string]string) + } + for k, v := range annotations { + r.annotations[k] = v + } + return nil +} + +func (r *tarballReference) Transport() types.ImageTransport { + return r.transport +} + +func (r *tarballReference) StringWithinTransport() string { + return strings.Join(r.filenames, ":") +} + +func (r *tarballReference) DockerReference() reference.Named { + return nil +} + +func (r *tarballReference) PolicyConfigurationIdentity() string { + return "" +} + +func (r *tarballReference) PolicyConfigurationNamespaces() []string { + return nil +} + +// NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. +// The caller must call .Close() on the returned ImageCloser. +// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, +// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. +// WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. +func (r *tarballReference) NewImage(ctx *types.SystemContext) (types.ImageCloser, error) { + src, err := r.NewImageSource(ctx) + if err != nil { + return nil, err + } + img, err := image.FromSource(ctx, src) + if err != nil { + src.Close() + return nil, err + } + return img, nil +} + +func (r *tarballReference) DeleteImage(ctx *types.SystemContext) error { + for _, filename := range r.filenames { + if err := os.Remove(filename); err != nil && !os.IsNotExist(err) { + return fmt.Errorf("error removing %q: %v", filename, err) + } + } + return nil +} + +func (r *tarballReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) { + return nil, fmt.Errorf("destination not implemented yet") +} diff --git a/vendor/github.com/containers/image/tarball/tarball_src.go b/vendor/github.com/containers/image/tarball/tarball_src.go new file mode 100644 index 00000000..8b5b496d --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_src.go @@ -0,0 +1,260 @@ +package tarball + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + "strings" + "time" + + "github.com/containers/image/types" + + digest "github.com/opencontainers/go-digest" + imgspecs "github.com/opencontainers/image-spec/specs-go" + imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +type tarballImageSource struct { + reference tarballReference + filenames []string + diffIDs []digest.Digest + diffSizes []int64 + blobIDs []digest.Digest + blobSizes []int64 + blobTypes []string + config []byte + configID digest.Digest + configSize int64 + manifest []byte +} + +func (r *tarballReference) NewImageSource(ctx *types.SystemContext) (types.ImageSource, error) { + // Gather up the digests, sizes, and date information for all of the files. + filenames := []string{} + diffIDs := []digest.Digest{} + diffSizes := []int64{} + blobIDs := []digest.Digest{} + blobSizes := []int64{} + blobTimes := []time.Time{} + blobTypes := []string{} + for _, filename := range r.filenames { + var file *os.File + var err error + var blobSize int64 + var blobTime time.Time + var reader io.Reader + if filename == "-" { + blobSize = int64(len(r.stdin)) + blobTime = time.Now() + reader = bytes.NewReader(r.stdin) + } else { + file, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q for reading: %v", filename, err) + } + defer file.Close() + reader = file + fileinfo, err := file.Stat() + if err != nil { + return nil, fmt.Errorf("error reading size of %q: %v", filename, err) + } + blobSize = fileinfo.Size() + blobTime = fileinfo.ModTime() + } + + // Default to assuming the layer is compressed. + layerType := imgspecv1.MediaTypeImageLayerGzip + + // Set up to digest the file as it is. + blobIDdigester := digest.Canonical.Digester() + reader = io.TeeReader(reader, blobIDdigester.Hash()) + + // Set up to digest the file after we maybe decompress it. + diffIDdigester := digest.Canonical.Digester() + uncompressed, err := gzip.NewReader(reader) + if err == nil { + // It is compressed, so the diffID is the digest of the uncompressed version + reader = io.TeeReader(uncompressed, diffIDdigester.Hash()) + } else { + // It is not compressed, so the diffID and the blobID are going to be the same + diffIDdigester = blobIDdigester + layerType = imgspecv1.MediaTypeImageLayer + uncompressed = nil + } + n, err := io.Copy(ioutil.Discard, reader) + if err != nil { + return nil, fmt.Errorf("error reading %q: %v", filename, err) + } + if uncompressed != nil { + uncompressed.Close() + } + + // Grab our uncompressed and possibly-compressed digests and sizes. + filenames = append(filenames, filename) + diffIDs = append(diffIDs, diffIDdigester.Digest()) + diffSizes = append(diffSizes, n) + blobIDs = append(blobIDs, blobIDdigester.Digest()) + blobSizes = append(blobSizes, blobSize) + blobTimes = append(blobTimes, blobTime) + blobTypes = append(blobTypes, layerType) + } + + // Build the rootfs and history for the configuration blob. + rootfs := imgspecv1.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + created := time.Time{} + history := []imgspecv1.History{} + // Pick up the layer comment from the configuration's history list, if one is set. + comment := "imported from tarball" + if len(r.config.History) > 0 && r.config.History[0].Comment != "" { + comment = r.config.History[0].Comment + } + for i := range diffIDs { + createdBy := fmt.Sprintf("/bin/sh -c #(nop) ADD file:%s in %c", diffIDs[i].Hex(), os.PathSeparator) + history = append(history, imgspecv1.History{ + Created: &blobTimes[i], + CreatedBy: createdBy, + Comment: comment, + }) + // Use the mtime of the most recently modified file as the image's creation time. + if created.Before(blobTimes[i]) { + created = blobTimes[i] + } + } + + // Pick up other defaults from the config in the reference. + config := r.config + if config.Created == nil { + config.Created = &created + } + if config.Architecture == "" { + config.Architecture = runtime.GOARCH + } + if config.OS == "" { + config.OS = runtime.GOOS + } + config.RootFS = rootfs + config.History = history + + // Encode and digest the image configuration blob. + configBytes, err := json.Marshal(&config) + if err != nil { + return nil, fmt.Errorf("error generating configuration blob for %q: %v", strings.Join(r.filenames, separator), err) + } + configID := digest.Canonical.FromBytes(configBytes) + configSize := int64(len(configBytes)) + + // Populate a manifest with the configuration blob and the file as the single layer. + layerDescriptors := []imgspecv1.Descriptor{} + for i := range blobIDs { + layerDescriptors = append(layerDescriptors, imgspecv1.Descriptor{ + Digest: blobIDs[i], + Size: blobSizes[i], + MediaType: blobTypes[i], + }) + } + annotations := make(map[string]string) + for k, v := range r.annotations { + annotations[k] = v + } + manifest := imgspecv1.Manifest{ + Versioned: imgspecs.Versioned{ + SchemaVersion: 2, + }, + Config: imgspecv1.Descriptor{ + Digest: configID, + Size: configSize, + MediaType: imgspecv1.MediaTypeImageConfig, + }, + Layers: layerDescriptors, + Annotations: annotations, + } + + // Encode the manifest. + manifestBytes, err := json.Marshal(&manifest) + if err != nil { + return nil, fmt.Errorf("error generating manifest for %q: %v", strings.Join(r.filenames, separator), err) + } + + // Return the image. + src := &tarballImageSource{ + reference: *r, + filenames: filenames, + diffIDs: diffIDs, + diffSizes: diffSizes, + blobIDs: blobIDs, + blobSizes: blobSizes, + blobTypes: blobTypes, + config: configBytes, + configID: configID, + configSize: configSize, + manifest: manifestBytes, + } + + return src, nil +} + +func (is *tarballImageSource) Close() error { + return nil +} + +func (is *tarballImageSource) GetBlob(blobinfo types.BlobInfo) (io.ReadCloser, int64, error) { + // We should only be asked about things in the manifest. Maybe the configuration blob. + if blobinfo.Digest == is.configID { + return ioutil.NopCloser(bytes.NewBuffer(is.config)), is.configSize, nil + } + // Maybe one of the layer blobs. + for i := range is.blobIDs { + if blobinfo.Digest == is.blobIDs[i] { + // We want to read that layer: open the file or memory block and hand it back. + if is.filenames[i] == "-" { + return ioutil.NopCloser(bytes.NewBuffer(is.reference.stdin)), int64(len(is.reference.stdin)), nil + } + reader, err := os.Open(is.filenames[i]) + if err != nil { + return nil, -1, fmt.Errorf("error opening %q: %v", is.filenames[i], err) + } + return reader, is.blobSizes[i], nil + } + } + return nil, -1, fmt.Errorf("no blob with digest %q found", blobinfo.Digest.String()) +} + +// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). +// It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); +// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). +func (is *tarballImageSource) GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) { + if instanceDigest != nil { + return nil, "", fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return is.manifest, imgspecv1.MediaTypeImageManifest, nil +} + +// GetSignatures returns the image's signatures. It may use a remote (= slow) service. +// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for +// (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list +// (e.g. if the source never returns manifest lists). +func (*tarballImageSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) { + if instanceDigest != nil { + return nil, fmt.Errorf("manifest lists are not supported by the %q transport", transportName) + } + return nil, nil +} + +func (is *tarballImageSource) Reference() types.ImageReference { + return &is.reference +} + +// LayerInfosForCopy() returns updated layer info that should be used when reading, in preference to values in the manifest, if specified. +func (*tarballImageSource) LayerInfosForCopy() []types.BlobInfo { + return nil +} diff --git a/vendor/github.com/containers/image/tarball/tarball_transport.go b/vendor/github.com/containers/image/tarball/tarball_transport.go new file mode 100644 index 00000000..72558b5e --- /dev/null +++ b/vendor/github.com/containers/image/tarball/tarball_transport.go @@ -0,0 +1,66 @@ +package tarball + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "strings" + + "github.com/containers/image/transports" + "github.com/containers/image/types" +) + +const ( + transportName = "tarball" + separator = ":" +) + +var ( + // Transport implements the types.ImageTransport interface for "tarball:" images, + // which are makeshift images constructed using one or more possibly-compressed tar + // archives. + Transport = &tarballTransport{} +) + +type tarballTransport struct { +} + +func (t *tarballTransport) Name() string { + return transportName +} + +func (t *tarballTransport) ParseReference(reference string) (types.ImageReference, error) { + var stdin []byte + var err error + filenames := strings.Split(reference, separator) + for _, filename := range filenames { + if filename == "-" { + stdin, err = ioutil.ReadAll(os.Stdin) + if err != nil { + return nil, fmt.Errorf("error buffering stdin: %v", err) + } + continue + } + f, err := os.Open(filename) + if err != nil { + return nil, fmt.Errorf("error opening %q: %v", filename, err) + } + f.Close() + } + ref := &tarballReference{ + transport: t, + filenames: filenames, + stdin: stdin, + } + return ref, nil +} + +func (t *tarballTransport) ValidatePolicyConfigurationScope(scope string) error { + // See the explanation in daemonReference.PolicyConfigurationIdentity. + return errors.New(`tarball: does not support any scopes except the default "" one`) +} + +func init() { + transports.Register(Transport) +} diff --git a/vendor/github.com/containers/image/transports/alltransports/alltransports.go b/vendor/github.com/containers/image/transports/alltransports/alltransports.go index 4279b9d2..b4552df6 100644 --- a/vendor/github.com/containers/image/transports/alltransports/alltransports.go +++ b/vendor/github.com/containers/image/transports/alltransports/alltransports.go @@ -13,8 +13,9 @@ import ( _ "github.com/containers/image/oci/archive" _ "github.com/containers/image/oci/layout" _ "github.com/containers/image/openshift" + _ "github.com/containers/image/tarball" // The ostree transport is registered by ostree*.go - _ "github.com/containers/image/storage" + // The storage transport is registered by storage*.go "github.com/containers/image/transports" "github.com/containers/image/types" "github.com/pkg/errors" diff --git a/vendor/github.com/containers/image/transports/alltransports/storage.go b/vendor/github.com/containers/image/transports/alltransports/storage.go new file mode 100644 index 00000000..a867c664 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage.go @@ -0,0 +1,8 @@ +// +build !containers_image_storage_stub + +package alltransports + +import ( + // Register the storage transport + _ "github.com/containers/image/storage" +) diff --git a/vendor/github.com/containers/image/transports/alltransports/storage_stub.go b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go new file mode 100644 index 00000000..4ac684e5 --- /dev/null +++ b/vendor/github.com/containers/image/transports/alltransports/storage_stub.go @@ -0,0 +1,9 @@ +// +build containers_image_storage_stub + +package alltransports + +import "github.com/containers/image/transports" + +func init() { + transports.Register(transports.NewStubTransport("containers-storage")) +} diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go index 4ede907b..2e9c7105 100644 --- a/vendor/github.com/containers/image/types/types.go +++ b/vendor/github.com/containers/image/types/types.go @@ -73,11 +73,12 @@ type ImageReference interface { // and each following element to be a prefix of the element preceding it. PolicyConfigurationNamespaces() []string - // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport. - // The caller must call .Close() on the returned Image. + // NewImage returns a types.ImageCloser for this reference, possibly specialized for this ImageTransport. + // The caller must call .Close() on the returned ImageCloser. // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource, // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage. - NewImage(ctx *SystemContext) (Image, error) + // WARNING: This may not do the right thing for a manifest list, see image.FromSource for details. + NewImage(ctx *SystemContext) (ImageCloser, error) // NewImageSource returns a types.ImageSource for this reference. // The caller must call .Close() on the returned ImageSource. NewImageSource(ctx *SystemContext) (ImageSource, error) @@ -96,9 +97,10 @@ type BlobInfo struct { Size int64 // -1 if unknown URLs []string Annotations map[string]string + MediaType string } -// ImageSource is a service, possibly remote (= slow), to download components of a single image. +// ImageSource is a service, possibly remote (= slow), to download components of a single image or a named image set (manifest list). // This is primarily useful for copying images around; for examining their properties, Image (below) // is usually more useful. // Each ImageSource should eventually be closed by calling Close(). @@ -113,15 +115,21 @@ type ImageSource interface { Close() error // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available). // It may use a remote (= slow) service. - GetManifest() ([]byte, string, error) - // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest - // out of a manifest list. - GetTargetManifest(digest digest.Digest) ([]byte, string, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list); + // this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists). + GetManifest(instanceDigest *digest.Digest) ([]byte, string, error) // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown). - // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1. + // The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. GetBlob(BlobInfo) (io.ReadCloser, int64, error) // GetSignatures returns the image's signatures. It may use a remote (= slow) service. - GetSignatures(context.Context) ([][]byte, error) + // If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve signatures for + // (when the primary manifest is a manifest list); this never happens if the primary manifest is not a manifest list + // (e.g. if the source never returns manifest lists). + GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided; Size may be -1. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // ImageDestination is a service, possibly remote (= slow), to store components of a single image. @@ -153,9 +161,10 @@ type ImageDestination interface { AcceptsForeignLayerURLs() bool // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise. MustMatchRuntimeOS() bool - // PutBlob writes contents of stream and returns data representing the result (with all data filled in). + // PutBlob writes contents of stream and returns data representing the result. // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it. // inputInfo.Size is the expected length of stream, if known. + // inputInfo.MediaType describes the blob format, if known. // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available // to any other readers for download using the supplied digest. // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far. @@ -194,28 +203,35 @@ func (e ManifestTypeRejectedError) Error() string { // Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them, // allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else. // This also makes the UnparsedImage→Image conversion an explicitly visible step. -// Each UnparsedImage should eventually be closed by calling Close(). +// +// An UnparsedImage is a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The UnparsedImage must not be used after the underlying ImageSource is Close()d. type UnparsedImage interface { // Reference returns the reference used to set up this source, _as specified by the user_ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image. Reference() ImageReference - // Close removes resources associated with an initialized UnparsedImage, if any. - Close() error // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need. Manifest() ([]byte, string, error) // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need. Signatures(ctx context.Context) ([][]byte, error) + // LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer blobsums that are listed in the image's manifest. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. + // WARNING: The list may contain duplicates, and they are semantically relevant. + LayerInfosForCopy() []BlobInfo } // Image is the primary API for inspecting properties of images. -// Each Image should eventually be closed by calling Close(). +// An Image is based on a pair of (ImageSource, instance digest); it can represent either a manifest list or a single image instance. +// +// The Image must not be used after the underlying ImageSource is Close()d. type Image interface { // Note that Reference may return nil in the return value of UpdatedImage! UnparsedImage // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object. // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below. ConfigInfo() BlobInfo - // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise. + // ConfigBlob returns the blob described by ConfigInfo, if ConfigInfo().Digest != ""; nil otherwise. // The result is cached; it is OK to call this however often you need. ConfigBlob() ([]byte, error) // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about @@ -223,7 +239,7 @@ type Image interface { // old image manifests work (docker v2s1 especially). OCIConfig() (*v1.Image, error) // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers). - // The Digest field is guaranteed to be provided; Size may be -1. + // The Digest field is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided. // WARNING: The list may contain duplicates, and they are semantically relevant. LayerInfos() []BlobInfo // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref. @@ -240,16 +256,23 @@ type Image interface { // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired. // This does not change the state of the original Image object. UpdatedImage(options ManifestUpdateOptions) (Image, error) - // IsMultiImage returns true if the image's manifest is a list of images, false otherwise. - IsMultiImage() bool // Size returns an approximation of the amount of disk space which is consumed by the image in its current // location. If the size is not known, -1 will be returned. Size() (int64, error) } +// ImageCloser is an Image with a Close() method which must be called by the user. +// This is returned by ImageReference.NewImage, which transparently instantiates a types.ImageSource, +// to ensure that the ImageSource is closed. +type ImageCloser interface { + Image + // Close removes resources associated with an initialized ImageCloser. + Close() error +} + // ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest type ManifestUpdateOptions struct { - LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers) + LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls+annotations) which should replace the originals, in order (the root layer first, and then successive layered layers). BlobInfos' MediaType fields are ignored. EmbeddedDockerReference reference.Named ManifestMIMEType string // The values below are NOT requests to modify the image; they provide optional context which may or may not be used. @@ -283,7 +306,7 @@ type DockerAuthConfig struct { Password string } -// SystemContext allows parametrizing access to implicitly-accessed resources, +// SystemContext allows parameterizing access to implicitly-accessed resources, // like configuration files in /etc and users' login state in their home directory. // Various components can share the same field only if their semantics is exactly // the same; if in doubt, add a new field. @@ -306,6 +329,10 @@ type SystemContext struct { SystemRegistriesConfPath string // If not "", overrides the default path for the authentication file AuthFilePath string + // If not "", overrides the use of platform.GOARCH when choosing an image or verifying architecture match. + ArchitectureChoice string + // If not "", overrides the use of platform.GOOS when choosing an image or verifying OS match. + OSChoice string // === OCI.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), @@ -314,6 +341,8 @@ type SystemContext struct { OCICertPath string // Allow downloading OCI image layers over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. OCIInsecureSkipTLSVerify bool + // If not "", use a shared directory for storing blobs rather than within OCI layouts + OCISharedBlobDirPath string // === docker.Transport overrides === // If not "", a directory containing a CA certificate (ending with ".crt"), @@ -322,8 +351,9 @@ type SystemContext struct { DockerCertPath string // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above. // Ignored if DockerCertPath is non-empty. - DockerPerHostCertDirPath string - DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerPerHostCertDirPath string + // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections. + DockerInsecureSkipTLSVerify bool // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials DockerAuthConfig *DockerAuthConfig // if not "", an User-Agent header is added to each request when contacting a registry. @@ -334,6 +364,20 @@ type SystemContext struct { DockerDisableV1Ping bool // Directory to use for OSTree temporary files OSTreeTmpDirPath string + + // === docker/daemon.Transport overrides === + // A directory containing a CA certificate (ending with ".crt"), + // a client certificate (ending with ".cert") and a client certificate key + // (ending with ".key") used when talking to a Docker daemon. + DockerDaemonCertPath string + // The hostname or IP to the Docker daemon. If not set (aka ""), client.DefaultDockerHost is assumed. + DockerDaemonHost string + // Used to skip TLS verification, off by default. To take effect DockerDaemonCertPath needs to be specified as well. + DockerDaemonInsecureSkipTLSVerify bool + + // === dir.Transport overrides === + // DirForceCompress compresses the image layers if set to true + DirForceCompress bool } // ProgressProperties is used to pass information from the copy code to a monitor which diff --git a/vendor/github.com/containers/image/vendor.conf b/vendor/github.com/containers/image/vendor.conf index d5bae3b0..f3634b38 100644 --- a/vendor/github.com/containers/image/vendor.conf +++ b/vendor/github.com/containers/image/vendor.conf @@ -1,5 +1,5 @@ github.com/sirupsen/logrus v1.0.0 -github.com/containers/storage 47536c89fcc545a87745e1a1573addc439409165 +github.com/containers/storage master github.com/davecgh/go-spew 346938d642f2ec3594ed81d874461961cd0faa76 github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1 github.com/docker/distribution 5f6282db7d65e6d72ad7c2cc66310724a57be716 @@ -22,7 +22,7 @@ github.com/pborman/uuid 1b00554d822231195d1babd97ff4a781231955c9 github.com/pkg/errors 248dadf4e9068a0b3e79f02ed0a610d935de5302 github.com/pmezard/go-difflib 792786c7400a136282c1664665ae0a8db921c6c2 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vbatts/tar-split v0.10.2 golang.org/x/crypto 453249f01cfeb54c3d549ddb75ff152ca243f9d8 golang.org/x/net 6b27048ae5e6ad1ef927e72e437531493de612fe golang.org/x/sys 43e60d72a8e2bd92ee98319ba9a384a0e9837c08 @@ -36,4 +36,5 @@ github.com/tchap/go-patricia v2.2.6 github.com/opencontainers/selinux ba1aefe8057f1d0cfb8e88d0ec1dc85925ef987d github.com/BurntSushi/toml b26d9c308763d68093482582cea63d69be07a0f0 github.com/ostreedev/ostree-go aeb02c6b6aa2889db3ef62f7855650755befd460 -github.com/gogo/protobuf/proto fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/gogo/protobuf fcdc5011193ff531a548e9b0301828d5a5b97fd8 +github.com/pquerna/ffjson master diff --git a/vendor/github.com/containers/storage/drivers/overlay/overlay.go b/vendor/github.com/containers/storage/drivers/overlay/overlay.go index 249f98bc..4974a94e 100644 --- a/vendor/github.com/containers/storage/drivers/overlay/overlay.go +++ b/vendor/github.com/containers/storage/drivers/overlay/overlay.go @@ -3,7 +3,6 @@ package overlay import ( - "bufio" "fmt" "io" "io/ioutil" @@ -26,7 +25,6 @@ import ( "github.com/containers/storage/pkg/locker" "github.com/containers/storage/pkg/mount" "github.com/containers/storage/pkg/parsers" - "github.com/containers/storage/pkg/parsers/kernel" "github.com/containers/storage/pkg/system" units "github.com/docker/go-units" "github.com/opencontainers/selinux/go-selinux/label" @@ -124,22 +122,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - if err := supportsOverlay(); err != nil { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") - } - - // require kernel 4.0.0 to ensure multiple lower dirs are supported - v, err := kernel.GetKernelVersion() - if err != nil { - return nil, err - } - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") - } - logrus.Warn("Using pre-4.0.0 kernel for overlay, mount failures may require kernel update") - } - fsMagic, err := graphdriver.GetFSMagic(home) if err != nil { return nil, err @@ -153,22 +135,18 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap case graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: logrus.Errorf("'overlay' is not supported over %s", backingFs) return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s", backingFs) - case graphdriver.FsMagicBtrfs: - // Support for OverlayFS on BTRFS was added in kernel 4.7 - // See https://btrfs.wiki.kernel.org/index.php/Changelog - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 7, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - logrus.Errorf("'overlay' requires kernel 4.7 to use on %s", backingFs) - return nil, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' requires kernel 4.7 to use on %s", backingFs) - } - logrus.Warn("Using pre-4.7.0 kernel for overlay on btrfs, may require kernel update") - } } rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) if err != nil { return nil, err } + + supportsDType, err := supportsOverlay(home, fsMagic, rootUID, rootGID) + if err != nil { + return nil, errors.Wrap(graphdriver.ErrNotSupported, "kernel does not support overlay fs") + } + // Create the driver home dir if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err @@ -178,16 +156,6 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } - supportsDType, err := fsutils.SupportsDType(home) - if err != nil { - return nil, err - } - if !supportsDType { - logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) - // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 - // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) - } - d := &Driver{ name: "overlay", home: home, @@ -210,10 +178,10 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap } } else if opts.quota.Size > 0 { // if xfs is not the backing fs then error out if the storage-opt overlay.size is used. - return nil, fmt.Errorf("Storage Option overlay.size only supported for backingFS XFS. Found %v", backingFs) + return nil, fmt.Errorf("Storage option overlay.size only supported for backingFS XFS. Found %v", backingFs) } - logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) + logrus.Debugf("backingFs=%s, projectQuotaSupported=%v", backingFs, projectQuotaSupported) return d, nil } @@ -227,20 +195,20 @@ func parseOptions(options []string) (*overlayOptions, error) { } key = strings.ToLower(key) switch key { - case "overlay.override_kernel_check", "overlay2.override_kernel_check": + case ".override_kernel_check", "overlay.override_kernel_check", "overlay2.override_kernel_check": logrus.Debugf("overlay: override_kernelcheck=%s", val) o.overrideKernelCheck, err = strconv.ParseBool(val) if err != nil { return nil, err } - case "overlay.size", "overlay2.size": + case ".size", "overlay.size", "overlay2.size": logrus.Debugf("overlay: size=%s", val) size, err := units.RAMInBytes(val) if err != nil { return nil, err } o.quota.Size = uint64(size) - case "overlay.imagestore", "overlay2.imagestore": + case ".imagestore", "overlay.imagestore", "overlay2.imagestore": logrus.Debugf("overlay: imagestore=%s", val) // Additional read only image stores to use for lower paths for _, store := range strings.Split(val, ",") { @@ -264,25 +232,59 @@ func parseOptions(options []string) (*overlayOptions, error) { return o, nil } -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported +func supportsOverlay(home string, homeMagic graphdriver.FsMagic, rootUID, rootGID int) (supportsDType bool, err error) { + // We can try to modprobe overlay first exec.Command("modprobe", "overlay").Run() - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil + layerDir, err := ioutil.TempDir(home, "compat") + if err == nil { + // Check if reading the directory's contents populates the d_type field, which is required + // for proper operation of the overlay filesystem. + supportsDType, err = fsutils.SupportsDType(layerDir) + if err != nil { + return false, err } + if !supportsDType { + logrus.Warn(overlayutils.ErrDTypeNotSupported("overlay", backingFs)) + // TODO: Will make fatal when CRI-O Has AMI built on RHEL7.4 + // return nil, overlayutils.ErrDTypeNotSupported("overlay", backingFs) + } + + // Try a test mount in the specific location we're looking at using. + mergedDir := filepath.Join(layerDir, "merged") + lower1Dir := filepath.Join(layerDir, "lower1") + lower2Dir := filepath.Join(layerDir, "lower2") + defer func() { + // Permitted to fail, since the various subdirectories + // can be empty or not even there, and the home might + // legitimately be not empty + _ = unix.Unmount(mergedDir, unix.MNT_DETACH) + _ = os.RemoveAll(layerDir) + _ = os.Remove(home) + }() + _ = idtools.MkdirAs(mergedDir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower1Dir, 0700, rootUID, rootGID) + _ = idtools.MkdirAs(lower2Dir, 0700, rootUID, rootGID) + flags := fmt.Sprintf("lowerdir=%s:%s", lower1Dir, lower2Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Debugf("overlay test mount with multiple lowers succeeded") + return supportsDType, nil + } + } + flags = fmt.Sprintf("lowerdir=%s", lower1Dir) + if len(flags) < unix.Getpagesize() { + if mountFrom(filepath.Dir(home), "overlay", mergedDir, "overlay", 0, flags) == nil { + logrus.Errorf("overlay test mount with multiple lowers failed, but succeeded with a single lower") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "kernel too old to provide multiple lowers feature for overlay") + } + } + logrus.Errorf("'overlay' is not supported over %s at %q", backingFs, home) + return supportsDType, errors.Wrapf(graphdriver.ErrIncompatibleFS, "'overlay' is not supported over %s at %q", backingFs, home) } + logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") + return supportsDType, errors.Wrap(graphdriver.ErrNotSupported, "'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") } func useNaiveDiff(home string) bool { @@ -650,10 +652,21 @@ func (d *Driver) Get(id, mountLabel string) (_ string, retErr error) { func (d *Driver) Put(id string) error { d.locker.Lock(id) defer d.locker.Unlock(id) + dir := d.dir(id) + if _, err := os.Stat(dir); err != nil { + return err + } mountpoint := path.Join(d.dir(id), "merged") if count := d.ctr.Decrement(mountpoint); count > 0 { return nil } + if _, err := ioutil.ReadFile(path.Join(dir, lowerFile)); err != nil { + // If no lower, we used the diff directory, so no work to do + if os.IsNotExist(err) { + return nil + } + return err + } if err := unix.Unmount(mountpoint, unix.MNT_DETACH); err != nil { logrus.Debugf("Failed to unmount %s overlay: %s - %v", id, mountpoint, err) } diff --git a/vendor/github.com/containers/storage/drivers/vfs/driver.go b/vendor/github.com/containers/storage/drivers/vfs/driver.go index cf8eca91..ae62207d 100644 --- a/vendor/github.com/containers/storage/drivers/vfs/driver.go +++ b/vendor/github.com/containers/storage/drivers/vfs/driver.go @@ -36,6 +36,11 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap for _, option := range options { if strings.HasPrefix(option, "vfs.imagestore=") { d.homes = append(d.homes, strings.Split(option[15:], ",")...) + continue + } + if strings.HasPrefix(option, ".imagestore=") { + d.homes = append(d.homes, strings.Split(option[12:], ",")...) + continue } } return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil diff --git a/vendor/github.com/containers/storage/images.go b/vendor/github.com/containers/storage/images.go index ed22e131..962e1bb7 100644 --- a/vendor/github.com/containers/storage/images.go +++ b/vendor/github.com/containers/storage/images.go @@ -14,12 +14,22 @@ import ( "github.com/pkg/errors" ) +const ( + // ImageDigestBigDataKey is the name of the big data item whose + // contents we consider useful for computing a "digest" of the + // image, by which we can locate the image later. + ImageDigestBigDataKey = "manifest" +) + // An Image is a reference to a layer and an associated metadata string. type Image struct { // ID is either one which was specified at create-time, or a random // value which was generated by the library. ID string `json:"id"` + // Digest is a digest value that we can use to locate the image. + Digest digest.Digest `json:"digest,omitempty"` + // Names is an optional set of user-defined convenience values. The // image can be referred to by its ID or any of its names. Names are // unique among images. @@ -28,7 +38,7 @@ type Image struct { // TopLayer is the ID of the topmost layer of the image itself, if the // image contains one or more layers. Multiple images can refer to the // same top layer. - TopLayer string `json:"layer"` + TopLayer string `json:"layer,omitempty"` // Metadata is data we keep for the convenience of the caller. It is not // expected to be large, since it is kept in memory. @@ -74,6 +84,10 @@ type ROImageStore interface { // Images returns a slice enumerating the known images. Images() ([]Image, error) + + // Images returns a slice enumerating the images which have a big data + // item with the name ImageDigestBigDataKey and the specified digest. + ByDigest(d digest.Digest) ([]*Image, error) } // ImageStore provides bookkeeping for information about Images. @@ -87,7 +101,7 @@ type ImageStore interface { // Create creates an image that has a specified ID (or a random one) and // optional names, using the specified layer as its topmost (hopefully // read-only) layer. That layer can be referenced by multiple images. - Create(id string, names []string, layer, metadata string, created time.Time) (*Image, error) + Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (*Image, error) // SetNames replaces the list of names associated with an image with the // supplied values. @@ -107,6 +121,7 @@ type imageStore struct { idindex *truncindex.TruncIndex byid map[string]*Image byname map[string]*Image + bydigest map[digest.Digest][]*Image } func (r *imageStore) Images() ([]Image, error) { @@ -140,6 +155,7 @@ func (r *imageStore) Load() error { idlist := []string{} ids := make(map[string]*Image) names := make(map[string]*Image) + digests := make(map[digest.Digest][]*Image) if err = json.Unmarshal(data, &images); len(data) == 0 || err == nil { idlist = make([]string, 0, len(images)) for n, image := range images { @@ -152,6 +168,16 @@ func (r *imageStore) Load() error { } names[name] = images[n] } + // Implicit digest + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + digests[digest] = append(digests[digest], images[n]) + } + // Explicit digest + if image.Digest == "" { + image.Digest = image.BigDataDigests[ImageDigestBigDataKey] + } else if image.Digest != image.BigDataDigests[ImageDigestBigDataKey] { + digests[image.Digest] = append(digests[image.Digest], images[n]) + } } } if shouldSave && !r.IsReadWrite() { @@ -161,6 +187,7 @@ func (r *imageStore) Load() error { r.idindex = truncindex.NewTruncIndex(idlist) r.byid = ids r.byname = names + r.bydigest = digests if shouldSave { return r.Save() } @@ -199,6 +226,7 @@ func newImageStore(dir string) (ImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -219,6 +247,7 @@ func newROImageStore(dir string) (ROImageStore, error) { images: []*Image{}, byid: make(map[string]*Image), byname: make(map[string]*Image), + bydigest: make(map[digest.Digest][]*Image), } if err := istore.Load(); err != nil { return nil, err @@ -265,7 +294,7 @@ func (r *imageStore) SetFlag(id string, flag string, value interface{}) error { return r.Save() } -func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time) (image *Image, err error) { +func (r *imageStore) Create(id string, names []string, layer, metadata string, created time.Time, searchableDigest digest.Digest) (image *Image, err error) { if !r.IsReadWrite() { return nil, errors.Wrapf(ErrStoreIsReadOnly, "not allowed to create new images at %q", r.imagespath()) } @@ -292,6 +321,7 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c if err == nil { image = &Image{ ID: id, + Digest: searchableDigest, Names: names, TopLayer: layer, Metadata: metadata, @@ -304,6 +334,10 @@ func (r *imageStore) Create(id string, names []string, layer, metadata string, c r.images = append(r.images, image) r.idindex.Add(id) r.byid[id] = image + if searchableDigest != "" { + list := r.bydigest[searchableDigest] + r.bydigest[searchableDigest] = append(list, image) + } for _, name := range names { r.byname[name] = image } @@ -383,6 +417,28 @@ func (r *imageStore) Delete(id string) error { r.images = append(r.images[:toDeleteIndex], r.images[toDeleteIndex+1:]...) } } + if digest, ok := image.BigDataDigests[ImageDigestBigDataKey]; ok { + // remove the image from the digest-based index + if list, ok := r.bydigest[digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, digest) + } else { + r.bydigest[digest] = prunedList + } + } + } + if image.Digest != "" { + // remove the image's hard-coded digest from the digest-based index + if list, ok := r.bydigest[image.Digest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, image.Digest) + } else { + r.bydigest[image.Digest] = prunedList + } + } + } if err := r.Save(); err != nil { return err } @@ -411,6 +467,13 @@ func (r *imageStore) Exists(id string) bool { return ok } +func (r *imageStore) ByDigest(d digest.Digest) ([]*Image, error) { + if images, ok := r.bydigest[d]; ok { + return images, nil + } + return nil, ErrImageUnknown +} + func (r *imageStore) BigData(id, key string) ([]byte, error) { if key == "" { return nil, errors.Wrapf(ErrInvalidBigDataName, "can't retrieve image big data value for empty name") @@ -486,6 +549,17 @@ func (r *imageStore) BigDataNames(id string) ([]string, error) { return image.BigDataNames, nil } +func imageSliceWithoutValue(slice []*Image, value *Image) []*Image { + modified := make([]*Image, 0, len(slice)) + for _, v := range slice { + if v == value { + continue + } + modified = append(modified, v) + } + return modified +} + func (r *imageStore) SetBigData(id, key string, data []byte) error { if key == "" { return errors.Wrapf(ErrInvalidBigDataName, "can't set empty name for image big data item") @@ -528,6 +602,29 @@ func (r *imageStore) SetBigData(id, key string, data []byte) error { image.BigDataNames = append(image.BigDataNames, key) save = true } + if key == ImageDigestBigDataKey { + if oldDigest != "" && oldDigest != newDigest && oldDigest != image.Digest { + // remove the image from the list of images in the digest-based + // index which corresponds to the old digest for this item, unless + // it's also the hard-coded digest + if list, ok := r.bydigest[oldDigest]; ok { + prunedList := imageSliceWithoutValue(list, image) + if len(prunedList) == 0 { + delete(r.bydigest, oldDigest) + } else { + r.bydigest[oldDigest] = prunedList + } + } + } + // add the image to the list of images in the digest-based index which + // corresponds to the new digest for this item, unless it's already there + list := r.bydigest[newDigest] + if len(list) == len(imageSliceWithoutValue(list, image)) { + // the list isn't shortened by trying to prune this image from it, + // so it's not in there yet + r.bydigest[newDigest] = append(list, image) + } + } if save { err = r.Save() } diff --git a/vendor/github.com/containers/storage/images_ffjson.go b/vendor/github.com/containers/storage/images_ffjson.go index fd3ef11a..f6a8b065 100644 --- a/vendor/github.com/containers/storage/images_ffjson.go +++ b/vendor/github.com/containers/storage/images_ffjson.go @@ -38,6 +38,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { buf.WriteString(`{ "id":`) fflib.WriteJsonString(buf, string(j.ID)) buf.WriteByte(',') + if len(j.Digest) != 0 { + buf.WriteString(`"digest":`) + fflib.WriteJsonString(buf, string(j.Digest)) + buf.WriteByte(',') + } if len(j.Names) != 0 { buf.WriteString(`"names":`) if j.Names != nil { @@ -54,9 +59,11 @@ func (j *Image) MarshalJSONBuf(buf fflib.EncodingBuffer) error { } buf.WriteByte(',') } - buf.WriteString(`"layer":`) - fflib.WriteJsonString(buf, string(j.TopLayer)) - buf.WriteByte(',') + if len(j.TopLayer) != 0 { + buf.WriteString(`"layer":`) + fflib.WriteJsonString(buf, string(j.TopLayer)) + buf.WriteByte(',') + } if len(j.Metadata) != 0 { buf.WriteString(`"metadata":`) fflib.WriteJsonString(buf, string(j.Metadata)) @@ -144,6 +151,8 @@ const ( ffjtImageID + ffjtImageDigest + ffjtImageNames ffjtImageTopLayer @@ -163,6 +172,8 @@ const ( var ffjKeyImageID = []byte("id") +var ffjKeyImageDigest = []byte("digest") + var ffjKeyImageNames = []byte("names") var ffjKeyImageTopLayer = []byte("layer") @@ -266,6 +277,14 @@ mainparse: goto mainparse } + case 'd': + + if bytes.Equal(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + case 'f': if bytes.Equal(ffjKeyImageFlags, kn) { @@ -356,6 +375,12 @@ mainparse: goto mainparse } + if fflib.EqualFoldRight(ffjKeyImageDigest, kn) { + currentKey = ffjtImageDigest + state = fflib.FFParse_want_colon + goto mainparse + } + if fflib.SimpleLetterEqualFold(ffjKeyImageID, kn) { currentKey = ffjtImageID state = fflib.FFParse_want_colon @@ -382,6 +407,9 @@ mainparse: case ffjtImageID: goto handle_ID + case ffjtImageDigest: + goto handle_Digest + case ffjtImageNames: goto handle_Names @@ -446,6 +474,32 @@ handle_ID: state = fflib.FFParse_after_value goto mainparse +handle_Digest: + + /* handler: j.Digest type=digest.Digest kind=string quoted=false*/ + + { + + { + if tok != fflib.FFTok_string && tok != fflib.FFTok_null { + return fs.WrapErr(fmt.Errorf("cannot unmarshal %s into Go value for Digest", tok)) + } + } + + if tok == fflib.FFTok_null { + + } else { + + outBuf := fs.Output.Bytes() + + j.Digest = digest.Digest(string(outBuf)) + + } + } + + state = fflib.FFParse_after_value + goto mainparse + handle_Names: /* handler: j.Names type=[]string kind=slice quoted=false*/ diff --git a/vendor/github.com/containers/storage/store.go b/vendor/github.com/containers/storage/store.go index c84bfaf9..de605432 100644 --- a/vendor/github.com/containers/storage/store.go +++ b/vendor/github.com/containers/storage/store.go @@ -370,6 +370,10 @@ type Store interface { // and may have different metadata, big data items, and flags. ImagesByTopLayer(id string) ([]*Image, error) + // ImagesByDigest returns a list of images which contain a big data item + // named ImageDigestBigDataKey whose contents have the specified digest. + ImagesByDigest(d digest.Digest) ([]*Image, error) + // Container returns a specific container. Container(id string) (*Container, error) @@ -430,6 +434,8 @@ type ImageOptions struct { // CreationDate, if not zero, will override the default behavior of marking the image as having been // created when CreateImage() was called, recording CreationDate instead. CreationDate time.Time + // Digest is a hard-coded digest value that we can use to look up the image. It is optional. + Digest digest.Digest } // ContainerOptions is used for passing options to a Store's CreateContainer() method. @@ -487,11 +493,6 @@ func GetStore(options StoreOptions) (Store, error) { if err := os.MkdirAll(options.RunRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } - for _, subdir := range []string{} { - if err := os.MkdirAll(filepath.Join(options.RunRoot, subdir), 0700); err != nil && !os.IsExist(err) { - return nil, err - } - } if err := os.MkdirAll(options.GraphRoot, 0700); err != nil && !os.IsExist(err) { return nil, err } @@ -834,11 +835,11 @@ func (s *store) CreateImage(id string, names []string, layer, metadata string, o } creationDate := time.Now().UTC() - if options != nil { + if options != nil && !options.CreationDate.IsZero() { creationDate = options.CreationDate } - return ristore.Create(id, names, layer, metadata, creationDate) + return ristore.Create(id, names, layer, metadata, creationDate, options.Digest) } func (s *store) CreateContainer(id string, names []string, image, layer, metadata string, options *ContainerOptions) (*Container, error) { @@ -1888,10 +1889,16 @@ func (s *store) layersByMappedDigest(m func(ROLayerStore, digest.Digest) ([]Laye } storeLayers, err := m(store, d) if err != nil { - return nil, err + if errors.Cause(err) != ErrLayerUnknown { + return nil, err + } + continue } layers = append(layers, storeLayers...) } + if len(layers) == 0 { + return nil, ErrLayerUnknown + } return layers, nil } @@ -2080,6 +2087,33 @@ func (s *store) ImagesByTopLayer(id string) ([]*Image, error) { return images, nil } +func (s *store) ImagesByDigest(d digest.Digest) ([]*Image, error) { + images := []*Image{} + + istore, err := s.ImageStore() + if err != nil { + return nil, err + } + + istores, err := s.ROImageStores() + if err != nil { + return nil, err + } + for _, store := range append([]ROImageStore{istore}, istores...) { + store.Lock() + defer store.Unlock() + if modified, err := store.Modified(); modified || err != nil { + store.Load() + } + imageList, err := store.ByDigest(d) + if err != nil && err != ErrImageUnknown { + return nil, err + } + images = append(images, imageList...) + } + return images, nil +} + func (s *store) Container(id string) (*Container, error) { rcstore, err := s.ContainerStore() if err != nil { diff --git a/vendor/github.com/containers/storage/vendor.conf b/vendor/github.com/containers/storage/vendor.conf index 9958101e..a30f8feb 100644 --- a/vendor/github.com/containers/storage/vendor.conf +++ b/vendor/github.com/containers/storage/vendor.conf @@ -15,7 +15,7 @@ github.com/pmezard/go-difflib v1.0.0 github.com/sirupsen/logrus v1.0.0 github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/tchap/go-patricia v2.2.6 -github.com/vbatts/tar-split bd4c5d64c3e9297f410025a3b1bd0c58f659e721 +github.com/vbatts/tar-split v0.10.2 golang.org/x/net 7dcfb8076726a3fdd9353b6b8a1f1b6be6811bd6 golang.org/x/sys 07c182904dbd53199946ba614a412c61d3c548f5 github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac From 553979e1fc38954eaa3bc5948eae9bdf7e885d36 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 27 Nov 2017 18:46:10 -0500 Subject: [PATCH 06/59] storage: API fixups github.com/containers/image/types.ImageReference.NewImage() can take a *github.com/containers/image/types.SystemContext now, so pass it one if pkg/storage/imageService.CanPull() has one to give it. Signed-off-by: Nalin Dahyabhai --- pkg/storage/image.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pkg/storage/image.go b/pkg/storage/image.go index 99b15f3a..4ea8eb8c 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -184,7 +184,11 @@ func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, if err != nil { return false, err } - src, err := image.FromSource(rawSource) + sourceCtx := &types.SystemContext{} + if options.SourceCtx != nil { + sourceCtx = options.SourceCtx + } + src, err := image.FromSource(sourceCtx, rawSource) if err != nil { rawSource.Close() return false, err From f3b7065bd80a53e4c48e8c45c36dcb86717e1578 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Wed, 12 Jul 2017 12:41:38 -0400 Subject: [PATCH 07/59] Return image references from the storage package The image's canonical reference is a name with a digest of the image's manifest, so in imageService.ImageStatus() and imageService.ListImages(), divide the image's name list into tagged and digested values, and if we have names, add canonical versions. In Server.ContainerStatus(), return the image name as it was given to us as the image, and the image digested reference as the image reference. In Server.ListImages(), be sure to only return tagged names in the RepoTags field. In Server.ImageStatus(), also return canonical references in the RepoDigests field. In Server.PullImage(), be sure that we consistently return the same image reference for an image, whether we ended up pulling it or not. Signed-off-by: Nalin Dahyabhai --- pkg/storage/image.go | 157 +++++++++++++++++++++++++++++++------ server/container_create.go | 37 ++------- server/container_list.go | 1 + server/container_status.go | 6 +- server/image_list.go | 12 +-- server/image_pull.go | 10 ++- server/image_status.go | 8 +- server/inspect.go | 10 ++- server/inspect_test.go | 9 ++- test/helpers.bash | 54 +++---------- test/image.bats | 80 ++++++++++++++++--- test/inspect.bats | 6 +- types/types.go | 1 + 13 files changed, 265 insertions(+), 126 deletions(-) diff --git a/pkg/storage/image.go b/pkg/storage/image.go index 4ea8eb8c..f3f2d43d 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -9,6 +9,7 @@ import ( "github.com/containers/image/copy" "github.com/containers/image/docker/reference" "github.com/containers/image/image" + "github.com/containers/image/manifest" "github.com/containers/image/signature" istorage "github.com/containers/image/storage" "github.com/containers/image/transports/alltransports" @@ -25,12 +26,12 @@ var ( // ImageResult wraps a subset of information about an image: its ID, its names, // and the size, if known, or nil if it isn't. type ImageResult struct { - ID string - Names []string - Size *uint64 - // TODO(runcom): this is an hack for https://github.com/kubernetes-incubator/cri-o/pull/1136 - // drop this when we have proper image IDs (as in, image IDs should be just - // the config blog digest which is stable across same images). + ID string + Name string + RepoTags []string + RepoDigests []string + Size *uint64 + Digest digest.Digest ConfigDigest digest.Digest } @@ -47,6 +48,11 @@ type imageService struct { registries []string } +// sizer knows its size. +type sizer interface { + Size() (int64, error) +} + // ImageServer wraps up various CRI-related activities into a reusable // implementation. type ImageServer interface { @@ -88,6 +94,66 @@ func (svc *imageService) getRef(name string) (types.ImageReference, error) { return ref, nil } +func sortNamesByType(names []string) (bestName string, tags, digests []string) { + for _, name := range names { + if len(name) > 72 && name[len(name)-72:len(name)-64] == "@sha256:" { + digests = append(digests, name) + } else { + tags = append(tags, name) + } + } + if len(digests) > 0 { + bestName = digests[0] + } + if len(tags) > 0 { + bestName = tags[0] + } + return bestName, tags, digests +} + +func (svc *imageService) makeRepoDigests(knownRepoDigests, tags []string, imageID string) (imageDigest digest.Digest, repoDigests []string) { + // Look up the image's digest. + img, err := svc.store.Image(imageID) + if err != nil { + return "", knownRepoDigests + } + imageDigest = img.Digest + if imageDigest == "" { + imgDigest, err := svc.store.ImageBigDataDigest(imageID, storage.ImageDigestBigDataKey) + if err != nil || imgDigest == "" { + return "", knownRepoDigests + } + imageDigest = imgDigest + } + // If there are no names to convert to canonical references, we're done. + if len(tags) == 0 { + return imageDigest, knownRepoDigests + } + // We only want to supplement what's already explicitly in the list, so keep track of values + // that we already know. + digestMap := make(map[string]struct{}) + repoDigests = knownRepoDigests + for _, repoDigest := range knownRepoDigests { + digestMap[repoDigest] = struct{}{} + } + // For each tagged name, parse the name, and if we can extract a named reference, convert + // it into a canonical reference using the digest and add it to the list. + for _, tag := range tags { + if ref, err2 := reference.ParseAnyReference(tag); err2 == nil { + if name, ok := ref.(reference.Named); ok { + trimmed := reference.TrimNamed(name) + if imageRef, err3 := reference.WithDigest(trimmed, imageDigest); err3 == nil { + if _, ok := digestMap[imageRef.String()]; !ok { + repoDigests = append(repoDigests, imageRef.String()) + digestMap[imageRef.String()] = struct{}{} + } + } + } + } + } + return imageDigest, repoDigests +} + func (svc *imageService) ListImages(systemContext *types.SystemContext, filter string) ([]ImageResult, error) { results := []ImageResult{} if filter != "" { @@ -96,16 +162,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s return nil, err } if image, err := istorage.Transport.GetStoreImage(svc.store, ref); err == nil { - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } else { @@ -118,16 +194,26 @@ func (svc *imageService) ListImages(systemContext *types.SystemContext, filter s if err != nil { return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) img.Close() + if err != nil { + return nil, err + } + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) results = append(results, ImageResult{ - ID: image.ID, - Names: image.Names, - Size: size, + ID: image.ID, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, + Size: size, + Digest: imageDigest, + ConfigDigest: configDigest, }) } } @@ -152,29 +238,54 @@ func (svc *imageService) ImageStatus(systemContext *types.SystemContext, nameOrI return nil, err } - img, err := ref.NewImage(systemContext) + img, err := ref.NewImageSource(systemContext) if err != nil { return nil, err } defer img.Close() size := imageSize(img) + configDigest, err := imageConfigDigest(img, nil) + if err != nil { + return nil, err + } - return &ImageResult{ + name, tags, digests := sortNamesByType(image.Names) + imageDigest, repoDigests := svc.makeRepoDigests(digests, tags, image.ID) + result := ImageResult{ ID: image.ID, - Names: image.Names, + Name: name, + RepoTags: tags, + RepoDigests: repoDigests, Size: size, - ConfigDigest: img.ConfigInfo().Digest, - }, nil + Digest: imageDigest, + ConfigDigest: configDigest, + } + + return &result, nil } -func imageSize(img types.Image) *uint64 { - if sum, err := img.Size(); err == nil { - usum := uint64(sum) - return &usum +func imageSize(img types.ImageSource) *uint64 { + if s, ok := img.(sizer); ok { + if sum, err := s.Size(); err == nil { + usum := uint64(sum) + return &usum + } } return nil } +func imageConfigDigest(img types.ImageSource, instanceDigest *digest.Digest) (digest.Digest, error) { + manifestBytes, manifestType, err := img.GetManifest(instanceDigest) + if err != nil { + return "", err + } + imgManifest, err := manifest.FromBlob(manifestBytes, manifestType) + if err != nil { + return "", err + } + return imgManifest.ConfigInfo().Digest, nil +} + func (svc *imageService) CanPull(imageName string, options *copy.Options) (bool, error) { srcRef, err := svc.prepareReference(imageName, options) if err != nil { diff --git a/server/container_create.go b/server/container_create.go index 78c7e6c9..e23dedb6 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -14,7 +14,6 @@ import ( "strings" "time" - "github.com/docker/distribution/reference" dockermounts "github.com/docker/docker/pkg/mount" "github.com/docker/docker/pkg/stringid" "github.com/docker/docker/pkg/symlink" @@ -963,40 +962,19 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } } - image = images[0] - // Get imageName and imageRef that are requested in container status - imageName := image - status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), image) + // Get imageName and imageRef that are later requested in container status + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), images[0]) if err != nil { return nil, err } - + imageName := status.Name imageRef := status.ID - // - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 - // - //for _, n := range status.Names { - //r, err := reference.ParseNormalizedNamed(n) - //if err != nil { - //return nil, fmt.Errorf("failed to normalize image name for ImageRef: %v", err) - //} - //if digested, isDigested := r.(reference.Canonical); isDigested { - //imageRef = reference.FamiliarString(digested) - //break - //} - //} - for _, n := range status.Names { - r, err := reference.ParseNormalizedNamed(n) - if err != nil { - return nil, fmt.Errorf("failed to normalize image name for Image: %v", err) - } - if tagged, isTagged := r.(reference.Tagged); isTagged { - imageName = reference.FamiliarString(tagged) - break - } + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] } + specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ImageName, imageName) specgen.AddAnnotation(annotations.ImageRef, imageRef) specgen.AddAnnotation(annotations.IP, sb.IP()) @@ -1043,7 +1021,6 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.AddAnnotation(annotations.TTY, fmt.Sprintf("%v", containerConfig.Tty)) specgen.AddAnnotation(annotations.Stdin, fmt.Sprintf("%v", containerConfig.Stdin)) specgen.AddAnnotation(annotations.StdinOnce, fmt.Sprintf("%v", containerConfig.StdinOnce)) - specgen.AddAnnotation(annotations.Image, image) specgen.AddAnnotation(annotations.ResolvPath, sb.InfraContainer().CrioAnnotations()[annotations.ResolvPath]) created := time.Now() @@ -1079,7 +1056,7 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, attempt := metadata.Attempt containerInfo, err := s.StorageRuntimeServer().CreateContainer(s.ImageContext(), sb.Name(), sb.ID(), - image, image, + image, status.ID, containerName, containerID, metaname, attempt, diff --git a/server/container_list.go b/server/container_list.go index d32eea2d..060fa2af 100644 --- a/server/container_list.go +++ b/server/container_list.go @@ -97,6 +97,7 @@ func (s *Server) ListContainers(ctx context.Context, req *pb.ListContainersReque Metadata: ctr.Metadata(), Annotations: ctr.Annotations(), Image: img, + ImageRef: ctr.ImageRef(), } switch cState.Status { diff --git a/server/container_status.go b/server/container_status.go index f81be56f..3b84468f 100644 --- a/server/container_status.go +++ b/server/container_status.go @@ -3,6 +3,7 @@ package server import ( "time" + "github.com/containers/image/types" "github.com/kubernetes-incubator/cri-o/oci" "github.com/sirupsen/logrus" "golang.org/x/net/context" @@ -38,7 +39,10 @@ func (s *Server) ContainerStatus(ctx context.Context, req *pb.ContainerStatusReq ImageRef: c.ImageRef(), }, } - resp.Status.Image = &pb.ImageSpec{Image: c.ImageName()} + resp.Status.Image = &pb.ImageSpec{Image: c.Image()} + if status, err := s.StorageImageServer().ImageStatus(&types.SystemContext{}, c.ImageRef()); err == nil { + resp.Status.Image.Image = status.Name + } mounts := []*pb.Mount{} for _, cv := range c.Volumes() { diff --git a/server/image_list.go b/server/image_list.go index cbbd0d83..bcdc1036 100644 --- a/server/image_list.go +++ b/server/image_list.go @@ -33,14 +33,16 @@ func (s *Server) ListImages(ctx context.Context, req *pb.ListImagesRequest) (res for _, result := range results { if result.Size != nil { resp.Images = append(resp.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, - Size_: *result.Size, + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, + Size_: *result.Size, }) } else { resp.Images = append(resp.Images, &pb.Image{ - Id: result.ID, - RepoTags: result.Names, + Id: result.ID, + RepoTags: result.RepoTags, + RepoDigests: result.RepoDigests, }) } } diff --git a/server/image_pull.go b/server/image_pull.go index 2c7e8b2c..67dfc469 100644 --- a/server/image_pull.go +++ b/server/image_pull.go @@ -104,8 +104,16 @@ func (s *Server) PullImage(ctx context.Context, req *pb.PullImageRequest) (resp if pulled == "" && err != nil { return nil, err } + status, err := s.StorageImageServer().ImageStatus(s.ImageContext(), pulled) + if err != nil { + return nil, err + } + imageRef := status.ID + if len(status.RepoDigests) > 0 { + imageRef = status.RepoDigests[0] + } resp = &pb.PullImageResponse{ - ImageRef: pulled, + ImageRef: imageRef, } logrus.Debugf("PullImageResponse: %+v", resp) return resp, nil diff --git a/server/image_status.go b/server/image_status.go index c01a350c..4e2e6a0e 100644 --- a/server/image_status.go +++ b/server/image_status.go @@ -48,10 +48,10 @@ func (s *Server) ImageStatus(ctx context.Context, req *pb.ImageStatusRequest) (r } resp = &pb.ImageStatusResponse{ Image: &pb.Image{ - Id: status.ID, - RepoTags: status.Names, - Size_: *status.Size, - // TODO: https://github.com/kubernetes-incubator/cri-o/issues/531 + Id: status.ID, + RepoTags: status.RepoTags, + RepoDigests: status.RepoDigests, + Size_: *status.Size, }, } logrus.Debugf("ImageStatusResponse: %+v", resp) diff --git a/server/inspect.go b/server/inspect.go index 2aaac841..d1fe6abe 100644 --- a/server/inspect.go +++ b/server/inspect.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" + cimage "github.com/containers/image/types" "github.com/go-zoo/bone" "github.com/kubernetes-incubator/cri-o/lib/sandbox" "github.com/kubernetes-incubator/cri-o/oci" @@ -45,10 +46,17 @@ func (s *Server) getContainerInfo(id string, getContainerFunc func(id string) *o logrus.Debugf("can't find sandbox %s for container %s", ctr.Sandbox(), id) return types.ContainerInfo{}, errSandboxNotFound } + image := ctr.Image() + if s.ContainerServer != nil && s.ContainerServer.StorageImageServer() != nil { + if status, err := s.ContainerServer.StorageImageServer().ImageStatus(&cimage.SystemContext{}, ctr.ImageRef()); err == nil { + image = status.Name + } + } return types.ContainerInfo{ Name: ctr.Name(), Pid: ctrState.Pid, - Image: ctr.ImageName(), + Image: image, + ImageRef: ctr.ImageRef(), CreatedTime: ctrState.Created.UnixNano(), Labels: ctr.Labels(), Annotations: ctr.Annotations(), diff --git a/server/inspect_test.go b/server/inspect_test.go index c970fcd0..7246ef86 100644 --- a/server/inspect_test.go +++ b/server/inspect_test.go @@ -67,7 +67,7 @@ func TestGetContainerInfo(t *testing.T) { "io.kubernetes.test1": "value1", } getContainerFunc := func(id string) *oci.Container { - container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "imageName", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") + container, err := oci.NewContainer("testid", "testname", "", "/container/logs", mockNetNS{}, labels, annotations, annotations, "image", "imageName", "imageRef", &runtime.ContainerMetadata{}, "testsandboxid", false, false, false, false, false, "/root/for/container", created, "SIGKILL") if err != nil { t.Fatal(err) } @@ -101,8 +101,11 @@ func TestGetContainerInfo(t *testing.T) { if ci.Name != "testname" { t.Fatalf("expected name testname, got %s", ci.Name) } - if ci.Image != "imageName" { - t.Fatalf("expected image name imageName, got %s", ci.Image) + if ci.Image != "image" { + t.Fatalf("expected image name image, got %s", ci.Image) + } + if ci.ImageRef != "imageRef" { + t.Fatalf("expected image ref imageRef, got %s", ci.ImageRef) } if ci.Root != "/var/foo/container" { t.Fatalf("expected root to be /var/foo/container, got %s", ci.Root) diff --git a/test/helpers.bash b/test/helpers.bash index c698687a..60f24ce5 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -103,7 +103,7 @@ cp "$CONMON_BINARY" "$TESTDIR/conmon" PATH=$PATH:$TESTDIR -# Make sure we have a copy of the redis:latest image. +# Make sure we have a copy of the redis:alpine image. if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then mkdir -p "$ARTIFACTS_PATH"/redis-image if ! "$COPYIMG_BINARY" --import-from=docker://redis:alpine --export-to=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json ; then @@ -113,19 +113,6 @@ if ! [ -d "$ARTIFACTS_PATH"/redis-image ]; then fi fi -# TODO: remove the code below for redis digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above -if ! [ -d "$ARTIFACTS_PATH"/redis-image-digest ]; then - mkdir -p "$ARTIFACTS_PATH"/redis-image-digest - if ! "$COPYIMG_BINARY" --import-from=docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --export-to=dir:"$ARTIFACTS_PATH"/redis-image-digest --signature-policy="$INTEGRATION_ROOT"/policy.json ; then - echo "Error pulling docker://redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - rm -fr "$ARTIFACTS_PATH"/redis-image-digest - exit 1 - fi -fi - # Make sure we have a copy of the runcom/stderr-test image. if ! [ -d "$ARTIFACTS_PATH"/stderr-test ]; then mkdir -p "$ARTIFACTS_PATH"/stderr-test @@ -226,13 +213,8 @@ function start_crio() { "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" fi "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json -# TODO: remove the code below for redis:alpine digested image id when -# https://github.com/kubernetes-incubator/cri-o/issues/531 is complete -# as the digested reference will be auto-stored when pulling the tag -# above - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --import-from=dir:"$ARTIFACTS_PATH"/redis-image-digest --add-name=docker.io/library/redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/library/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/library/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --enable-shared-pid-namespace=${ENABLE_SHARED_PID_NAMESPACE} --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG @@ -252,44 +234,28 @@ function start_crio() { if [ "$status" -ne 0 ] ; then crictl pull redis:alpine fi - REDIS_IMAGEID=$(crictl inspecti redis:alpine | head -1 | sed -e "s/ID: //g") + REDIS_IMAGEID=$(crictl inspecti redis:alpine | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + REDIS_IMAGEREF=$(crictl inspecti redis:alpine | grep ^Digest: | head -n 1 | sed -e "s/Digest: //g") run crictl inspecti mrunalp/oom if [ "$status" -ne 0 ] ; then crictl pull mrunalp/oom fi - # - # - # - # TODO: remove the code below for redis digested image id when - # https://github.com/kubernetes-incubator/cri-o/issues/531 is complete - # as the digested reference will be auto-stored when pulling the tag - # above - # - # - # - REDIS_IMAGEID_DIGESTED="redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" - run crictl inspecti $REDIS_IMAGEID_DIGESTED - if [ "$status" -ne 0 ]; then - crictl pull $REDIS_IMAGEID_DIGESTED - fi - # - # - # - run crictl inspecti runcom/stderr-test + OOM_IMAGEID=$(crictl inspecti mrunalp/oom | grep ^ID: | head -n 1 | sed -e "s/ID: //g") + run crioctl image status --id=runcom/stderr-test if [ "$status" -ne 0 ] ; then crictl pull runcom/stderr-test:latest fi - STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | head -1 | sed -e "s/ID: //g") + STDERR_IMAGEID=$(crictl inspecti runcom/stderr-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti busybox if [ "$status" -ne 0 ] ; then crictl pull busybox:latest fi - BUSYBOX_IMAGEID=$(crictl inspecti busybox | head -1 | sed -e "s/ID: //g") + BUSYBOX_IMAGEID=$(crictl inspecti busybox | grep ^ID: | head -n 1 | sed -e "s/ID: //g") run crictl inspecti mrunalp/image-volume-test if [ "$status" -ne 0 ] ; then crictl pull mrunalp/image-volume-test:latest fi - VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | head -1 | sed -e "s/ID: //g") + VOLUME_IMAGEID=$(crictl inspecti mrunalp/image-volume-test | grep ^ID: | head -n 1 | sed -e "s/ID: //g") } function cleanup_ctrs() { diff --git a/test/image.bats b/test/image.bats index 65a047f6..ac609180 100644 --- a/test/image.bats +++ b/test/image.bats @@ -25,7 +25,7 @@ function teardown() { stop_crio } -@test "container status return image:tag if created by image ID" { +@test "container status when created by image ID" { start_crio run crictl runs "$TESTDATA"/sandbox_config.json @@ -43,16 +43,15 @@ function teardown() { run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "image: redis:alpine" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "container status return image@digest if created by image ID and digest available" { - skip "depends on https://github.com/kubernetes-incubator/cri-o/issues/531" - +@test "container status when created by image tagged reference" { start_crio run crictl runs "$TESTDATA"/sandbox_config.json @@ -60,9 +59,9 @@ function teardown() { [ "$status" -eq 0 ] pod_id="$output" - sed -e "s/%VALUE%/$REDIS_IMAGEID_DIGESTED/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageid.json + sed -e "s/%VALUE%/redis:alpine/g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imagetag.json - run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imagetag.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] ctr_id="$output" @@ -70,22 +69,51 @@ function teardown() { run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "image_ref: redis@sha256:03789f402b2ecfb98184bf128d180f398f81c63364948ff1454583b02442f73b" ]] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] cleanup_ctrs cleanup_pods stop_crio } -@test "image pull" { +@test "container status when created by image canonical reference" { + start_crio + + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + + sed -e "s|%VALUE%|$REDIS_IMAGEREF|g" "$TESTDATA"/container_config_by_imageid.json > "$TESTDIR"/ctr_by_imageref.json + + run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageref.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + + run crictl inspect "$ctr_id" --output yaml + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "image: docker.io/library/redis:alpine" ]] + [[ "$output" =~ "imageRef: $REDIS_IMAGEREF" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} + +@test "image pull and list" { start_crio "" "" --no-pause-image run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] + run crictl inspecti "$IMAGE" echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "$IMAGE" ]] + cleanup_images stop_crio } @@ -108,7 +136,33 @@ function teardown() { stop_crio } -@test "image pull and list by digest" { +@test "image pull and list by tag and ID" { + start_crio "" "" --no-pause-image + run crictl pull "$IMAGE:go" + echo "$output" + [ "$status" -eq 0 ] + + run crictl images --quiet "$IMAGE:go" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images --quiet @"$imageid" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + + run crictl images --quiet "$imageid" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] + + cleanup_images + stop_crio +} + +@test "image pull and list by digest and ID" { start_crio "" "" --no-pause-image run crictl pull nginx@sha256:33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc echo "$output" @@ -118,18 +172,20 @@ function teardown() { [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] + imageid="$output" run crictl images --quiet nginx@33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] + imageid="$output" - run crictl images --quiet @33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] - run crictl images --quiet 33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc + run crictl images --quiet "$imageid" [ "$status" -eq 0 ] echo "$output" [ "$output" != "" ] diff --git a/test/inspect.bats b/test/inspect.bats index bfbf82a0..bb7977e5 100644 --- a/test/inspect.bats +++ b/test/inspect.bats @@ -30,13 +30,15 @@ function teardown() { out=`echo -e "GET /containers/$ctr_id HTTP/1.1\r\nHost: crio\r\n" | socat - UNIX-CONNECT:$CRIO_SOCKET` echo "$out" [[ "$out" =~ "\"sandbox\":\"$pod_id\"" ]] - [[ "$out" =~ "\"image\":\"redis:alpine\"" ]] + [[ "$out" =~ "\"image\":\"docker.io/library/redis:alpine\"" ]] + [[ "$out" =~ "\"image_ref\":\"$REDIS_IMAGEREF\"" ]] run crictl inspect --output json "$ctr_id" echo "$output" [ "$status" -eq 0 ] [[ "$output" =~ "\"id\": \"$ctr_id\"" ]] - [[ "$output" =~ "\"image\": \"redis:alpine\"" ]] + [[ "$output" =~ "\"image\": \"docker.io/library/redis:alpine\"" ]] + [[ "$output" =~ "\"imageRef\": \"$REDIS_IMAGEREF\"" ]] run crictl inspects --output json "$pod_id" echo "$output" diff --git a/types/types.go b/types/types.go index 63780143..cedc3abd 100644 --- a/types/types.go +++ b/types/types.go @@ -5,6 +5,7 @@ type ContainerInfo struct { Name string `json:"name"` Pid int `json:"pid"` Image string `json:"image"` + ImageRef string `json:"image_ref"` CreatedTime int64 `json:"created_time"` Labels map[string]string `json:"labels"` Annotations map[string]string `json:"annotations"` From ff7bbb4f0df75b82030cbff173dae5f6ec356638 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 30 Oct 2017 17:18:42 -0400 Subject: [PATCH 08/59] Switch to ImageServer.UntagImage in RemoveImage handler Add an UntagImage() method to pkg/storage/ImageServer, which will check if the passed-in NameOrID is a name. If so, it merely removes that name from the image, removing the image only if it was the last name that the image had. If the NameOrID is an image ID, the image is removed, as RemoveImage() does. Signed-off-by: Nalin Dahyabhai --- pkg/storage/image.go | 60 ++++++++++++++++++++++++++++++++-- server/image_remove.go | 2 +- test/helpers.bash | 10 +++--- test/image_remove.bats | 74 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 138 insertions(+), 8 deletions(-) create mode 100644 test/image_remove.bats diff --git a/pkg/storage/image.go b/pkg/storage/image.go index f3f2d43d..a3916f20 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -3,7 +3,7 @@ package storage import ( "errors" "net" - "path/filepath" + "path" "strings" "github.com/containers/image/copy" @@ -21,6 +21,8 @@ import ( var ( // ErrCannotParseImageID is returned when we try to ResolveNames for an image ID ErrCannotParseImageID = errors.New("cannot parse an image ID") + // ErrImageMultiplyTagged is returned when we try to remove an image that still has multiple names + ErrImageMultiplyTagged = errors.New("image still has multiple names applied") ) // ImageResult wraps a subset of information about an image: its ID, its names, @@ -65,6 +67,9 @@ type ImageServer interface { PrepareImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.Image, error) // PullImage imports an image from the specified location. PullImage(systemContext *types.SystemContext, imageName string, options *copy.Options) (types.ImageReference, error) + // UntagImage removes a name from the specified image, and if it was + // the only name the image had, removes the image. + UntagImage(systemContext *types.SystemContext, imageName string) error // RemoveImage deletes the specified image. RemoveImage(systemContext *types.SystemContext, imageName string) error // GetStore returns the reference to the storage library Store which @@ -389,6 +394,57 @@ func (svc *imageService) PullImage(systemContext *types.SystemContext, imageName return destRef, nil } +func (svc *imageService) UntagImage(systemContext *types.SystemContext, nameOrID string) error { + ref, err := alltransports.ParseImageName(nameOrID) + if err != nil { + ref2, err2 := istorage.Transport.ParseStoreReference(svc.store, "@"+nameOrID) + if err2 != nil { + ref3, err3 := istorage.Transport.ParseStoreReference(svc.store, nameOrID) + if err3 != nil { + return err + } + ref2 = ref3 + } + ref = ref2 + } + + img, err := istorage.Transport.GetStoreImage(svc.store, ref) + if err != nil { + return err + } + + if !strings.HasPrefix(img.ID, nameOrID) { + namedRef, err := svc.prepareReference(nameOrID, ©.Options{}) + if err != nil { + return err + } + + name := nameOrID + if namedRef.DockerReference() != nil { + name = namedRef.DockerReference().Name() + if tagged, ok := namedRef.DockerReference().(reference.NamedTagged); ok { + name = name + ":" + tagged.Tag() + } + if canonical, ok := namedRef.DockerReference().(reference.Canonical); ok { + name = name + "@" + canonical.Digest().String() + } + } + + prunedNames := make([]string, 0, len(img.Names)) + for _, imgName := range img.Names { + if imgName != name && imgName != nameOrID { + prunedNames = append(prunedNames, imgName) + } + } + + if len(prunedNames) > 0 { + return svc.store.SetNames(img.ID, prunedNames) + } + } + + return ref.DeleteImage(systemContext) +} + func (svc *imageService) RemoveImage(systemContext *types.SystemContext, nameOrID string) error { ref, err := alltransports.ParseImageName(nameOrID) if err != nil { @@ -483,7 +539,7 @@ func (svc *imageService) ResolveNames(imageName string) ([]string, error) { if r == "docker.io" && !strings.ContainsRune(remainder, '/') { rem = "library/" + rem } - images = append(images, filepath.Join(r, rem)) + images = append(images, path.Join(r, rem)) } return images, nil } diff --git a/server/image_remove.go b/server/image_remove.go index da744490..d1f1e884 100644 --- a/server/image_remove.go +++ b/server/image_remove.go @@ -40,7 +40,7 @@ func (s *Server) RemoveImage(ctx context.Context, req *pb.RemoveImageRequest) (r } } for _, img := range images { - err = s.StorageImageServer().RemoveImage(s.ImageContext(), img) + err = s.StorageImageServer().UntagImage(s.ImageContext(), img) if err != nil { logrus.Debugf("error deleting image %s: %v", img, err) continue diff --git a/test/helpers.bash b/test/helpers.bash index 60f24ce5..a0c715e1 100644 --- a/test/helpers.bash +++ b/test/helpers.bash @@ -212,11 +212,11 @@ function start_crio() { if ! [ "$3" = "--no-pause-image" ] ; then "$BIN2IMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --source-binary "$PAUSE_BINARY" fi - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --add-name=docker.io/library/redis:alpine --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/oom --import-from=dir:"$ARTIFACTS_PATH"/oom-image --add-name=docker.io/mrunalp/oom --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=mrunalp/image-volume-test --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --add-name=docker.io/mrunalp/image-volume-test --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --add-name=docker.io/library/busybox:latest --signature-policy="$INTEGRATION_ROOT"/policy.json - "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --add-name=docker.io/runcom/stderr-test:latest --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/redis:alpine --import-from=dir:"$ARTIFACTS_PATH"/redis-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/oom:latest --import-from=dir:"$ARTIFACTS_PATH"/oom-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/mrunalp/image-volume-test:latest --import-from=dir:"$ARTIFACTS_PATH"/image-volume-test-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/library/busybox:latest --import-from=dir:"$ARTIFACTS_PATH"/busybox-image --signature-policy="$INTEGRATION_ROOT"/policy.json + "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name=docker.io/runcom/stderr-test:latest --import-from=dir:"$ARTIFACTS_PATH"/stderr-test --signature-policy="$INTEGRATION_ROOT"/policy.json "$CRIO_BINARY" ${DEFAULT_MOUNTS_OPTS} ${HOOKS_OPTS} --conmon "$CONMON_BINARY" --listen "$CRIO_SOCKET" --cgroup-manager "$CGROUP_MANAGER" --registry "docker.io" --runtime "$RUNTIME_BINARY" --root "$TESTDIR/crio" --runroot "$TESTDIR/crio-run" $STORAGE_OPTIONS --seccomp-profile "$seccomp" --apparmor-profile "$apparmor" --cni-config-dir "$CRIO_CNI_CONFIG" --cni-plugin-dir "$CRIO_CNI_PLUGIN" --signature-policy "$INTEGRATION_ROOT"/policy.json --image-volumes "$IMAGE_VOLUMES" --pids-limit "$PIDS_LIMIT" --enable-shared-pid-namespace=${ENABLE_SHARED_PID_NAMESPACE} --log-size-max "$LOG_SIZE_MAX_LIMIT" --config /dev/null config >$CRIO_CONFIG # Prepare the CNI configuration files, we're running with non host networking by default diff --git a/test/image_remove.bats b/test/image_remove.bats new file mode 100644 index 00000000..a2f6f2e8 --- /dev/null +++ b/test/image_remove.bats @@ -0,0 +1,74 @@ +#!/usr/bin/env bats + +load helpers + +IMAGE=docker.io/kubernetes/pause + +function teardown() { + cleanup_test +} + +@test "image remove with multiple names, by name" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crioctl image pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the list of image names and IDs. + run crioctl image list + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Cycle through each name, removing it by name. The image that we assigned a second + # name to should still be around when we get to removing its second name. + grep ^Tag: <<< "$output" | while read -r header tag ; do + run crioctl image remove --id "$tag" + echo "$output" + [ "$status" -eq 0 ] + done + # List all images and their names. There should be none now. + run crioctl image list --quiet + echo "$output" + [ "$status" -eq 0 ] + [ "$output" = "" ] + printf '%s\n' "$output" | while IFS= read -r id; do + echo "$id" + done + # All done. + cleanup_images + stop_crio +} + +@test "image remove with multiple names, by ID" { + start_crio "" "" --no-pause-image + # Pull the image, giving it one name. + run crioctl image pull "$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + # Add a second name to the image. + run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json + echo "$output" + [ "$status" -eq 0 ] + # Get the image ID of the image we just saved. + run crioctl image status --id="$IMAGE" + echo "$output" + [ "$status" -eq 0 ] + [ "$output" != "" ] + # Try to remove the image using its ID. That should succeed. + grep ^ID: <<< "$output" | while read -r header id ; do + run crioctl image remove --id "$id" + echo "$output" + [ "$status" -ne 0 ] + done + # The image should be gone now. + run crioctl image status --id="$IMAGE" + echo "$output" + [ "$status" -ne 0 ] + # All done. + cleanup_images + stop_crio +} From 5ea050fc1211e6f42956f1cb5fe4c62e3d97e06e Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 11 Dec 2017 16:02:54 -0500 Subject: [PATCH 09/59] Handle truncated IDs in imageService.ResolveNames() Have ResolveNames() check if the value that it's been given is a truncated version of the ID of a locally-available image, and if it is, return the value as it was given. Signed-off-by: Nalin Dahyabhai --- pkg/storage/image.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/pkg/storage/image.go b/pkg/storage/image.go index a3916f20..5994d952 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -18,6 +18,10 @@ import ( digest "github.com/opencontainers/go-digest" ) +const ( + minimumTruncatedIDLength = 3 +) + var ( // ErrCannotParseImageID is returned when we try to ResolveNames for an image ID ErrCannotParseImageID = errors.New("cannot parse an image ID") @@ -512,6 +516,14 @@ func splitDockerDomain(name string) (domain, remainder string) { } func (svc *imageService) ResolveNames(imageName string) ([]string, error) { + // _Maybe_ it's a truncated image ID. Don't prepend a registry name, then. + if len(imageName) >= minimumTruncatedIDLength && svc.store != nil { + if img, err := svc.store.Image(imageName); err == nil && img != nil && strings.HasPrefix(img.ID, imageName) { + // It's a truncated version of the ID of an image that's present in local storage; + // we need to expand it. + return []string{img.ID}, nil + } + } // This to prevent any image ID to go through this routine _, err := reference.ParseNormalizedNamed(imageName) if err != nil { From 6a456d15023bd5193e8b1ab20cd9a833d254afd0 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Tue, 14 Nov 2017 09:54:53 -0500 Subject: [PATCH 10/59] Use crictl instead of crioctl in image integration tests Use crictl instead of crioctl in some of the integration tests that exercise image handling. Signed-off-by: Nalin Dahyabhai --- test/image.bats | 27 +++++++++++++++++++-------- test/image_remove.bats | 25 +++++++++++++------------ test/image_volume.bats | 2 +- 3 files changed, 33 insertions(+), 21 deletions(-) diff --git a/test/image.bats b/test/image.bats index ac609180..52336641 100644 --- a/test/image.bats +++ b/test/image.bats @@ -20,6 +20,10 @@ function teardown() { run crictl create "$pod_id" "$TESTDIR"/ctr_by_imageid.json "$TESTDATA"/sandbox_config.json echo "$output" [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] cleanup_ctrs cleanup_pods stop_crio @@ -92,6 +96,10 @@ function teardown() { [ "$status" -eq 0 ] ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + run crictl inspect "$ctr_id" --output yaml echo "$output" [ "$status" -eq 0 ] @@ -109,11 +117,20 @@ function teardown() { echo "$output" [ "$status" -eq 0 ] - run crictl inspecti "$IMAGE" + run crictl images --quiet "$IMAGE" + [ "$status" -eq 0 ] echo "$output" + [ "$output" != "" ] + imageid="$output" + + run crictl images @"$imageid" [ "$status" -eq 0 ] [[ "$output" =~ "$IMAGE" ]] + run crictl images --quiet "$imageid" + [ "$status" -eq 0 ] + echo "$output" + [ "$output" != "" ] cleanup_images stop_crio } @@ -174,12 +191,6 @@ function teardown() { [ "$output" != "" ] imageid="$output" - run crictl images --quiet nginx@33eb1ed1e802d4f71e52421f56af028cdf12bb3bfff5affeaf5bf0e328ffa1bc - [ "$status" -eq 0 ] - echo "$output" - [ "$output" != "" ] - imageid="$output" - run crictl images --quiet @"$imageid" [ "$status" -eq 0 ] echo "$output" @@ -254,7 +265,7 @@ function teardown() { [ "$status" -eq 0 ] [ "$output" != "" ] printf '%s\n' "$output" | while IFS= read -r id; do - run crictl inspecti "$id" + run crictl images -v "$id" echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] diff --git a/test/image_remove.bats b/test/image_remove.bats index a2f6f2e8..54b06c05 100644 --- a/test/image_remove.bats +++ b/test/image_remove.bats @@ -11,7 +11,7 @@ function teardown() { @test "image remove with multiple names, by name" { start_crio "" "" --no-pause-image # Pull the image, giving it one name. - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] # Add a second name to the image. @@ -19,19 +19,19 @@ function teardown() { echo "$output" [ "$status" -eq 0 ] # Get the list of image names and IDs. - run crioctl image list + run crictl images -v echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] # Cycle through each name, removing it by name. The image that we assigned a second # name to should still be around when we get to removing its second name. - grep ^Tag: <<< "$output" | while read -r header tag ; do - run crioctl image remove --id "$tag" + grep ^RepoTags: <<< "$output" | while read -r header tag ignored ; do + run crictl rmi "$tag" echo "$output" [ "$status" -eq 0 ] done # List all images and their names. There should be none now. - run crioctl image list --quiet + run crictl images --quiet echo "$output" [ "$status" -eq 0 ] [ "$output" = "" ] @@ -46,28 +46,29 @@ function teardown() { @test "image remove with multiple names, by ID" { start_crio "" "" --no-pause-image # Pull the image, giving it one name. - run crioctl image pull "$IMAGE" + run crictl pull "$IMAGE" echo "$output" [ "$status" -eq 0 ] # Add a second name to the image. run "$COPYIMG_BINARY" --root "$TESTDIR/crio" $STORAGE_OPTIONS --runroot "$TESTDIR/crio-run" --image-name="$IMAGE":latest --add-name="$IMAGE":othertag --signature-policy="$INTEGRATION_ROOT"/policy.json echo "$output" [ "$status" -eq 0 ] - # Get the image ID of the image we just saved. - run crioctl image status --id="$IMAGE" + # Get the list of the image's names and its ID. + run crictl images -v "$IMAGE":latest echo "$output" [ "$status" -eq 0 ] [ "$output" != "" ] # Try to remove the image using its ID. That should succeed. grep ^ID: <<< "$output" | while read -r header id ; do - run crioctl image remove --id "$id" + run crictl rmi "$id" echo "$output" - [ "$status" -ne 0 ] + [ "$status" -eq 0 ] done # The image should be gone now. - run crioctl image status --id="$IMAGE" + run crictl images -v "$IMAGE" echo "$output" - [ "$status" -ne 0 ] + [ "$status" -eq 0 ] + [ "$output" = "" ] # All done. cleanup_images stop_crio diff --git a/test/image_volume.bats b/test/image_volume.bats index 1b2d967b..f5b39401 100644 --- a/test/image_volume.bats +++ b/test/image_volume.bats @@ -55,7 +55,7 @@ function teardown() { run crictl exec --sync "$ctr_id" touch /imagevolume/test_file echo "$output" [ "$status" -eq 0 ] - + [ "$output" = "" ] run crictl stops "$pod_id" echo "$output" [ "$status" -eq 0 ] From 893aa4e8c70c5f5fc3e36f760a00949d84d9cb20 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Fri, 1 Dec 2017 11:04:51 -0500 Subject: [PATCH 11/59] Be more diligent about cleaning up failed-to-create containers If server/Server.createSandboxContainer() fails after calling server/Server.StorageRuntimeServer().CreateContainer(), cleanup logic in server/Server.CreateContainer() won't try to clean it up, but we still need to clean up the on-disk container and its layer. Signed-off-by: Nalin Dahyabhai --- server/container_create.go | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/server/container_create.go b/server/container_create.go index e23dedb6..080a1af7 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -1065,6 +1065,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if err != nil { return nil, err } + defer func() { + if err != nil { + err2 := s.StorageRuntimeServer().DeleteContainer(containerInfo.ID) + if err2 != nil { + logrus.Warnf("Failed to cleanup container directory: %v", err2) + } + } + }() mountPoint, err := s.StorageRuntimeServer().StartContainer(containerID) if err != nil { @@ -1074,7 +1082,8 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, containerImageConfig := containerInfo.Config if containerImageConfig == nil { - return nil, fmt.Errorf("empty image config for %s", image) + err = fmt.Errorf("empty image config for %s", image) + return nil, err } if containerImageConfig.Config.StopSignal != "" { From 492f758176ba001384c5f6de3058d452774949a1 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 4 Dec 2017 10:43:07 -0500 Subject: [PATCH 12/59] Playbooks: don't assume the default network is eth0 Replace instances of "ansible_eth0.ipv4.address" with "ansible_default_ipv4.address" in the integration test playbook, so that we can run tests without depending on the name of the primary network interface being "eth0". Signed-off-by: Nalin Dahyabhai --- contrib/test/integration/build/kubernetes.yml | 6 +++--- contrib/test/integration/e2e.yml | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/test/integration/build/kubernetes.yml b/contrib/test/integration/build/kubernetes.yml index 3a18321c..f7774627 100644 --- a/contrib/test/integration/build/kubernetes.yml +++ b/contrib/test/integration/build/kubernetes.yml @@ -43,9 +43,9 @@ export CONTAINER_RUNTIME_ENDPOINT='{{ crio_socket }} --runtime-request-timeout=5m' export ALLOW_SECURITY_CONTEXT="," export ALLOW_PRIVILEGED=1 - export DNS_SERVER_IP={{ ansible_eth0.ipv4.address }} - export API_HOST={{ ansible_eth0.ipv4.address }} - export API_HOST_IP={{ ansible_eth0.ipv4.address }} + export DNS_SERVER_IP={{ ansible_default_ipv4.address }} + export API_HOST={{ ansible_default_ipv4.address }} + export API_HOST_IP={{ ansible_default_ipv4.address }} export KUBE_ENABLE_CLUSTER_DNS=true ./hack/local-up-cluster.sh mode: "u=rwx,g=rwx,o=x" diff --git a/contrib/test/integration/e2e.yml b/contrib/test/integration/e2e.yml index 5c4d656e..ce456b4c 100644 --- a/contrib/test/integration/e2e.yml +++ b/contrib/test/integration/e2e.yml @@ -10,7 +10,7 @@ - name: update the server address for the custom cluster lineinfile: dest: /usr/local/bin/createcluster.sh - line: "export {{ item }}={{ ansible_eth0.ipv4.address }}" + line: "export {{ item }}={{ ansible_default_ipv4.address }}" regexp: "^export {{ item }}=" state: present with_items: From 0ab8c507f41f2c3c8a25f469744a46d23b069013 Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 4 Dec 2017 10:44:29 -0500 Subject: [PATCH 13/59] Install python-rhsm-certificates, handle python-boto Add python-rhsm-certificates to the list of packages that we require, so that the required certificates are available for the pull-image-with-signature tests. Add per-distribution package install tasks so that we install either python2-boto or python-boto, depending on whether we're running on Fedora or RHEL/CentOS, respectively. Signed-off-by: Nalin Dahyabhai --- contrib/test/integration/system.yml | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/contrib/test/integration/system.yml b/contrib/test/integration/system.yml index d07ae0c8..1c43f431 100644 --- a/contrib/test/integration/system.yml +++ b/contrib/test/integration/system.yml @@ -41,9 +41,9 @@ - ostree-devel - pkgconfig - python - - python2-boto - python2-crypto - python-devel + - python-rhsm-certificates - python-virtualenv - PyYAML - redhat-rpm-config @@ -57,6 +57,22 @@ async: 600 poll: 10 +- name: Add python2-boto for Fedora + package: + name: "{{ item }}" + state: present + with_items: + - python2-boto + when: ansible_distribution in ['Fedora'] + +- name: Add python-boto for RHEL and CentOS + package: + name: "{{ item }}" + state: present + with_items: + - python-boto + when: ansible_distribution in ['RedHat', 'CentOS'] + - name: Add Btrfs for Fedora package: name: "{{ item }}" From 72442d095703a4e84b41a7410b1b044ab6a02fce Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 4 Dec 2017 11:46:49 -0500 Subject: [PATCH 14/59] Don't skip a critest that we now pass We can pass the "listImage should get exactly 2 repoTags in the result image" test now, so we no longer need to skip it. Signed-off-by: Nalin Dahyabhai --- contrib/test/integration/critest.yml | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/contrib/test/integration/critest.yml b/contrib/test/integration/critest.yml index d3a38103..377ab59d 100644 --- a/contrib/test/integration/critest.yml +++ b/contrib/test/integration/critest.yml @@ -16,24 +16,21 @@ - name: Add masquerade for localhost command: iptables -t nat -I POSTROUTING -s 127.0.0.1 ! -d 127.0.0.1 -j MASQUERADE - # TODO(runcom): enable skipped tests once we fix them (image list related) - # https://github.com/kubernetes-incubator/cri-o/issues/1048 - name: run critest validation - shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'listImage should get exactly 2 repoTags in the result image' v" + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock v" args: chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" async: 5400 poll: 30 when: ansible_distribution not in ['RedHat', 'CentOS'] - # XXX: RHEL has an additional test which fails beacuse of selinux but disabling + # XXX: RHEL has an additional test which fails because of selinux but disabling # it doesn't solve the issue. - # TODO(runcom): enable skipped tests once we fix them (image list related and selinux) - # https://github.com/kubernetes-incubator/cri-o/issues/1048 + # TODO(runcom): enable skipped tests once we fix them (selinux) # https://bugzilla.redhat.com/show_bug.cgi?id=1414236 # https://access.redhat.com/solutions/2897781 - name: run critest validation - shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'listImage should get exactly 2 repoTags in the result image|should not allow privilege escalation when true' v" + shell: "critest -c --runtime-endpoint /var/run/crio/crio.sock --image-endpoint /var/run/crio/crio.sock -s 'should not allow privilege escalation when true' v" args: chdir: "{{ ansible_env.GOPATH }}/src/github.com/kubernetes-incubator/cri-o" async: 5400 From fa90249c5964d8735b2a9e8d70ce7da735daf01e Mon Sep 17 00:00:00 2001 From: Nalin Dahyabhai Date: Mon, 11 Dec 2017 16:41:28 -0500 Subject: [PATCH 15/59] Playbooks: install the atomic-registries package Install atomic-registries to get a /etc/containers/registries.conf file, so that we can resolve image names that don't include domain portions. Signed-off-by: Nalin Dahyabhai --- contrib/test/integration/system.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/contrib/test/integration/system.yml b/contrib/test/integration/system.yml index 1c43f431..453551fa 100644 --- a/contrib/test/integration/system.yml +++ b/contrib/test/integration/system.yml @@ -5,6 +5,7 @@ name: "{{ item }}" state: present with_items: + - atomic-registries - container-selinux - curl - device-mapper-devel From ffed9155542ddf07e60b60511799bfc3e308b826 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 1 Dec 2017 15:37:38 +0100 Subject: [PATCH 16/59] contrib: import system containers Signed-off-by: Antonio Murdaca --- contrib/system_containers/centos/Dockerfile | 29 ++ contrib/system_containers/centos/README.md | 57 +++ contrib/system_containers/centos/cccp.yml | 41 ++ .../centos/config.json.template | 427 +++++++++++++++++ .../system_containers/centos/manifest.json | 10 + contrib/system_containers/centos/run.sh | 8 + .../system_containers/centos/service.template | 20 + .../system_containers/centos/set_mounts.sh | 7 + .../centos/tmpfiles.template | 5 + contrib/system_containers/fedora/Dockerfile | 30 ++ contrib/system_containers/fedora/README.md | 53 +++ .../fedora/config.json.template | 432 ++++++++++++++++++ .../system_containers/fedora/manifest.json | 10 + contrib/system_containers/fedora/run.sh | 8 + .../system_containers/fedora/service.template | 20 + .../system_containers/fedora/set_mounts.sh | 7 + .../fedora/tmpfiles.template | 5 + contrib/system_containers/rhel/Dockerfile | 41 ++ .../rhel/config.json.template | 422 +++++++++++++++++ contrib/system_containers/rhel/help.md | 37 ++ contrib/system_containers/rhel/manifest.json | 10 + contrib/system_containers/rhel/run.sh | 8 + .../system_containers/rhel/service.template | 20 + contrib/system_containers/rhel/set_mounts.sh | 7 + .../system_containers/rhel/tmpfiles.template | 5 + 25 files changed, 1719 insertions(+) create mode 100644 contrib/system_containers/centos/Dockerfile create mode 100644 contrib/system_containers/centos/README.md create mode 100644 contrib/system_containers/centos/cccp.yml create mode 100644 contrib/system_containers/centos/config.json.template create mode 100644 contrib/system_containers/centos/manifest.json create mode 100755 contrib/system_containers/centos/run.sh create mode 100644 contrib/system_containers/centos/service.template create mode 100755 contrib/system_containers/centos/set_mounts.sh create mode 100644 contrib/system_containers/centos/tmpfiles.template create mode 100644 contrib/system_containers/fedora/Dockerfile create mode 100644 contrib/system_containers/fedora/README.md create mode 100644 contrib/system_containers/fedora/config.json.template create mode 100644 contrib/system_containers/fedora/manifest.json create mode 100755 contrib/system_containers/fedora/run.sh create mode 100644 contrib/system_containers/fedora/service.template create mode 100755 contrib/system_containers/fedora/set_mounts.sh create mode 100644 contrib/system_containers/fedora/tmpfiles.template create mode 100644 contrib/system_containers/rhel/Dockerfile create mode 100644 contrib/system_containers/rhel/config.json.template create mode 100644 contrib/system_containers/rhel/help.md create mode 100644 contrib/system_containers/rhel/manifest.json create mode 100755 contrib/system_containers/rhel/run.sh create mode 100644 contrib/system_containers/rhel/service.template create mode 100755 contrib/system_containers/rhel/set_mounts.sh create mode 100644 contrib/system_containers/rhel/tmpfiles.template diff --git a/contrib/system_containers/centos/Dockerfile b/contrib/system_containers/centos/Dockerfile new file mode 100644 index 00000000..0797fb14 --- /dev/null +++ b/contrib/system_containers/centos/Dockerfile @@ -0,0 +1,29 @@ +FROM centos + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +RUN yum-config-manager --nogpgcheck --add-repo https://cbs.centos.org/repos/virt7-container-common-candidate/x86_64/os/ && \ + yum install --disablerepo=extras --nogpgcheck --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/centos/README.md b/contrib/system_containers/centos/README.md new file mode 100644 index 00000000..428bc6ff --- /dev/null +++ b/contrib/system_containers/centos/README.md @@ -0,0 +1,57 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` + +## Binary version + +You can find the image automatically built as: registry.centos.org/projectatomic/cri-o:latest diff --git a/contrib/system_containers/centos/cccp.yml b/contrib/system_containers/centos/cccp.yml new file mode 100644 index 00000000..ec4dab74 --- /dev/null +++ b/contrib/system_containers/centos/cccp.yml @@ -0,0 +1,41 @@ +# This is for the purpose of building containers on the CentOS Community Container +# Pipeline. The containers are built, tested and delivered to registry.centos.org and +# lifecycled as well. A corresponding entry must exist in the container index itself, +# located at https://github.com/CentOS/container-index/tree/master/index.d +# You can know more at the following links: +# * https://github.com/CentOS/container-pipeline-service/blob/master/README.md +# * https://github.com/CentOS/container-index/blob/master/README.rst +# * https://wiki.centos.org/ContainerPipeline + +# This will be part of the name of the container. It should match the job-id in index entry +job-id: cri-o + +#the following are optional, can be left blank +#defaults, where applicable are filled in +#nulecule-file : nulecule + +# This flag tells the container pipeline to skip user defined tests on their container +test-skip : True + +# This is path of the script that initiates the user defined tests. It must be able to +# return an exit code. +test-script : null + +# This is the path of custom build script. +build-script : null + +# This is the path of the custom delivery script +delivery-script : null + +# This flag tells the pipeline to deliver this container to docker hub. +docker-index : True + +# This flag can be used to enable or disable the custom delivery +custom-delivery : False + +# This flag can be used to enable or disable delivery of container to local registry +local-delivery : True + +Upstreams : + - ref : + url : diff --git a/contrib/system_containers/centos/config.json.template b/contrib/system_containers/centos/config.json.template new file mode 100644 index 00000000..785383d4 --- /dev/null +++ b/contrib/system_containers/centos/config.json.template @@ -0,0 +1,427 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/centos/manifest.json b/contrib/system_containers/centos/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/centos/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/centos/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/centos/service.template b/contrib/system_containers/centos/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/centos/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/centos/set_mounts.sh b/contrib/system_containers/centos/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/centos/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/centos/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/fedora/Dockerfile b/contrib/system_containers/fedora/Dockerfile new file mode 100644 index 00000000..da12c6f0 --- /dev/null +++ b/contrib/system_containers/fedora/Dockerfile @@ -0,0 +1,30 @@ +FROM registry.fedoraproject.org/fedora:27 + +ENV VERSION=0 RELEASE=1 ARCH=x86_64 +LABEL com.redhat.component="cri-o" \ + name="$FGC/cri-o" \ + version="$VERSION" \ + release="$RELEASE.$DISTTAG" \ + architecture="$ARCH" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + summary="The cri-o daemon as a system container." \ + maintainer="Yu Qi Zhang " \ + atomic.type="system" + +COPY README.md / + +RUN dnf install --enablerepo=updates-testing --setopt=tsflags=nodocs -y iptables cri-o socat iproute runc && \ + rpm -V iptables cri-o iproute runc && \ + dnf clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +RUN sed -i '/storage_option =/s/.*/&\n"overlay.override_kernel_check=1",/' /exports/hostfs/etc/crio/crio.conf + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] diff --git a/contrib/system_containers/fedora/README.md b/contrib/system_containers/fedora/README.md new file mode 100644 index 00000000..6de39964 --- /dev/null +++ b/contrib/system_containers/fedora/README.md @@ -0,0 +1,53 @@ +# cri-o + +This is the cri-o daemon as a system container. + +## Building the image from source: + +``` +# git clone https://github.com/projectatomic/atomic-system-containers +# cd atomic-system-containers/cri-o +# docker build -t crio . +``` + +## Running the system container, with the atomic CLI: + +Pull from registry into ostree: + +``` +# atomic pull --storage ostree $REGISTRY/crio +``` + +Or alternatively, pull from local docker: + +``` +# atomic pull --storage ostree docker:crio:latest +``` + +Install the container: + +Currently we recommend using --system-package=no to avoid having rpmbuild create an rpm file +during installation. This flag will tell the atomic CLI to fall back to copying files to the +host instead. + +``` +# atomic install --system --system-package=no --name=crio ($REGISTRY)/crio +``` + +Start as a systemd service: + +``` +# systemctl start crio +``` + +Stopping the service + +``` +# systemctl stop crio +``` + +Removing the container + +``` +# atomic uninstall crio +``` diff --git a/contrib/system_containers/fedora/config.json.template b/contrib/system_containers/fedora/config.json.template new file mode 100644 index 00000000..0642fbc1 --- /dev/null +++ b/contrib/system_containers/fedora/config.json.template @@ -0,0 +1,432 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND", + "CAP_AUDIT_READ" + ] + }, + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [ + { + "type": "mount" + } + ], + "resources": { + "devices": [ + { + "access": "rwm", + "allow": true + } + ] + }, + "rootfsPropagation": "private" + }, + "mounts": [ + { + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/fedora/manifest.json b/contrib/system_containers/fedora/manifest.json new file mode 100644 index 00000000..38f4dc87 --- /dev/null +++ b/contrib/system_containers/fedora/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL" : "info", + "OPT_CNI" : "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE" : "/var/lib/containers/storage", + "VAR_LIB_ORIGIN" : "/var/lib/origin", + "VAR_LIB_KUBE" : "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/fedora/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/fedora/service.template b/contrib/system_containers/fedora/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/fedora/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/fedora/set_mounts.sh b/contrib/system_containers/fedora/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/fedora/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - diff --git a/contrib/system_containers/rhel/Dockerfile b/contrib/system_containers/rhel/Dockerfile new file mode 100644 index 00000000..3c113fda --- /dev/null +++ b/contrib/system_containers/rhel/Dockerfile @@ -0,0 +1,41 @@ +#oit## This file is managed by the OpenShift Image Tool +#oit## by the OpenShift Continuous Delivery team. +#oit## +#oit## Any yum repos listed in this file will effectively be ignored during CD builds. +#oit## Yum repos must be enabled in the oit configuration files. +#oit## Some aspects of this file may be managed programmatically. For example, the image name, labels (version, +#oit## release, and other), and the base FROM. Changes made directly in distgit may be lost during the next +#oit## reconciliation. +#oit## +FROM rhel7:7-released + +RUN \ + yum install --setopt=tsflags=nodocs -y socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + rpm -V socat iptables cri-o iproute runc skopeo-containers container-selinux && \ + yum clean all && \ + mkdir -p /exports/hostfs/etc/crio /exports/hostfs/opt/cni/bin/ /exports/hostfs/var/lib/containers/storage/ && \ + cp /etc/crio/* /exports/hostfs/etc/crio && \ + if test -e /usr/libexec/cni; then cp -Lr /usr/libexec/cni/* /exports/hostfs/opt/cni/bin/; fi + +COPY manifest.json tmpfiles.template config.json.template service.template /exports/ + +COPY set_mounts.sh / +COPY run.sh /usr/bin/ + +CMD ["/usr/bin/run.sh"] + +LABEL \ + com.redhat.component="cri-o-docker" \ + io.k8s.description="CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry." \ + maintainer="Jhon Honce " \ + name="openshift3/cri-o" \ + License="GPLv2+" \ + io.k8s.display-name="CRI-O" \ + summary="OCI-based implementation of Kubernetes Container Runtime Interface" \ + release="0.13.0.0" \ + version="v3.8.0" \ + architecture="x86_64" \ + usage="atomic install --system --system-package=no crio && systemctl start crio" \ + vendor="Red Hat" \ + io.openshift.tags="cri-o system rhel7" \ + atomic.type="system" diff --git a/contrib/system_containers/rhel/config.json.template b/contrib/system_containers/rhel/config.json.template new file mode 100644 index 00000000..a5eb001e --- /dev/null +++ b/contrib/system_containers/rhel/config.json.template @@ -0,0 +1,422 @@ +{ + "ociVersion": "1.0.0", + "platform": { + "arch": "amd64", + "os": "linux" + }, + "process": { + "args": [ + "/usr/bin/run.sh" + ], + "capabilities": { + "ambient": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "bounding": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "effective": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "inheritable": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ], + "permitted": [ + "CAP_CHOWN", + "CAP_FOWNER", + "CAP_FSETID", + "CAP_KILL", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETPCAP", + "CAP_LINUX_IMMUTABLE", + "CAP_NET_BIND_SERVICE", + "CAP_NET_BROADCAST", + "CAP_NET_ADMIN", + "CAP_NET_RAW", + "CAP_IPC_LOCK", + "CAP_IPC_OWNER", + "CAP_SYS_MODULE", + "CAP_SYS_RAWIO", + "CAP_SYS_CHROOT", + "CAP_SYS_PTRACE", + "CAP_SYS_PACCT", + "CAP_SYS_ADMIN", + "CAP_SYS_BOOT", + "CAP_SYS_NICE", + "CAP_SYS_RESOURCE", + "CAP_SYS_TIME", + "CAP_SYS_TTY_CONFIG", + "CAP_MKNOD", + "CAP_LEASE", + "CAP_AUDIT_WRITE", + "CAP_AUDIT_CONTROL", + "CAP_SETFCAP", + "CAP_DAC_OVERRIDE", + "CAP_MAC_OVERRIDE", + "CAP_DAC_READ_SEARCH", + "CAP_MAC_ADMIN", + "CAP_SYSLOG", + "CAP_WAKE_ALARM", + "CAP_BLOCK_SUSPEND" + ] + }, + "selinuxLabel": "system_u:system_r:container_runtime_t:s0", + "cwd": "/", + "env": [ + "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/local/go/bin:/root/go/bin", + "TERM=xterm", + "LOG_LEVEL=$LOG_LEVEL", + "NAME=$NAME" + ], + "noNewPrivileges": false, + "terminal": false, + "user": { + "gid": 0, + "uid": 0 + } + }, + "root": { + "path": "rootfs", + "readonly": true + }, + "hooks": {}, + "linux": { + "namespaces": [{ + "type": "mount" + }], + "resources": { + "devices": [{ + "access": "rwm", + "allow": true + }] + }, + "rootfsPropagation": "private" + }, + "mounts": [{ + "destination": "/tmp", + "options": [ + "private", + "bind", + "rw", + "mode=755" + ], + "source": "/tmp", + "type": "bind" + }, + { + "destination": "/etc", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/etc", + "type": "bind" + }, + { + "destination": "/lib/modules", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/lib/modules", + "type": "bind" + }, + { + "destination": "/root", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/root", + "type": "bind" + }, + { + "destination": "/home", + "options": [ + "rbind", + "rprivate", + "rw", + "mode=755" + ], + "source": "/home", + "type": "bind" + }, + { + "destination": "/mnt", + "options": [ + "rbind", + "rw", + "rprivate", + "mode=755" + ], + "source": "/mnt", + "type": "bind" + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}", + "destination": "/run", + "options": [ + "rshared", + "rbind", + "rw", + "mode=755" + ] + }, + { + "type": "bind", + "source": "${RUN_DIRECTORY}/systemd", + "destination": "/run/systemd", + "options": [ + "rslave", + "bind", + "rw", + "mode=755" + ] + }, + { + "destination": "/var/log", + "options": [ + "rbind", + "rslave", + "rw" + ], + "source": "/var/log", + "type": "bind" + }, + { + "destination": "/var/lib", + "options": [ + "rbind", + "rprivate", + "rw" + ], + "source": "${STATE_DIRECTORY}", + "type": "bind" + }, + { + "destination": "/var/lib/containers/storage", + "options": [ + "rbind", + "rshared", + "rw" + ], + "source": "${VAR_LIB_CONTAINERS_STORAGE}", + "type": "bind" + }, + { + "destination": "/var/lib/origin", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_ORIGIN}", + "type": "bind" + }, + { + "destination": "/var/lib/kubelet", + "options": [ + "rshared", + "bind", + "rw" + ], + "source": "${VAR_LIB_KUBE}", + "type": "bind" + }, + { + "destination": "/opt/cni", + "options": [ + "rbind", + "rprivate", + "ro", + "mode=755" + ], + "source": "${OPT_CNI}", + "type": "bind" + }, + { + "destination": "/dev", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/dev", + "type": "bind" + }, + { + "destination": "/sys", + "options": [ + "rprivate", + "rbind", + "rw", + "mode=755" + ], + "source": "/sys", + "type": "bind" + }, + { + "destination": "/proc", + "options": [ + "rbind", + "rw", + "mode=755" + ], + "source": "/proc", + "type": "proc" + } + ] +} diff --git a/contrib/system_containers/rhel/help.md b/contrib/system_containers/rhel/help.md new file mode 100644 index 00000000..e46702e7 --- /dev/null +++ b/contrib/system_containers/rhel/help.md @@ -0,0 +1,37 @@ +% CRI-O (1) Container Image Pages +% Jhon Honce +% September 7, 2017 + +# NAME +cri-o - OCI-based implementation of Kubernetes Container Runtime Interface + +# DESCRIPTION +CRI-O is an implementation of the Kubernetes CRI. It is a lightweight, OCI-compliant runtime that is native to kubernetes. CRI-O supports OCI container images and can pull from any container registry. + +You can find more information on the CRI-O project at + +# USAGE +Pull from local docker and install system container: + +``` +# atomic pull --storage ostree docker:openshift3/cri-o:latest +# atomic install --system --system-package=no --name cri-o openshift3/cri-o +``` + +Start and enable as a systemd service: +``` +# systemctl enable --now cri-o +``` + +Stopping the service +``` +# systemctl stop cri-o +``` + +Removing the container +``` +# atomic uninstall cri-o +``` + +# SEE ALSO +man systemd(1) diff --git a/contrib/system_containers/rhel/manifest.json b/contrib/system_containers/rhel/manifest.json new file mode 100644 index 00000000..727abf9e --- /dev/null +++ b/contrib/system_containers/rhel/manifest.json @@ -0,0 +1,10 @@ +{ + "version": "1.0", + "defaultValues": { + "LOG_LEVEL": "info", + "OPT_CNI": "/opt/cni", + "VAR_LIB_CONTAINERS_STORAGE": "/var/lib/containers/storage", + "VAR_LIB_ORIGIN": "/var/lib/origin", + "VAR_LIB_KUBE": "/var/lib/kubelet" + } +} diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh new file mode 100755 index 00000000..5621a7ba --- /dev/null +++ b/contrib/system_containers/rhel/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +# Ensure that new process maintain this SELinux label +PID=$$ +LABEL=`tr -d '\000' < /proc/$PID/attr/current` +printf %s $LABEL > /proc/self/attr/exec + +exec /usr/bin/crio --log-level=$LOG_LEVEL diff --git a/contrib/system_containers/rhel/service.template b/contrib/system_containers/rhel/service.template new file mode 100644 index 00000000..4c08b39d --- /dev/null +++ b/contrib/system_containers/rhel/service.template @@ -0,0 +1,20 @@ +[Unit] +Description=crio daemon +After=network.target + +[Service] +Type=notify +ExecStartPre=/bin/sh $DESTDIR/rootfs/set_mounts.sh +ExecStart=$EXEC_START +ExecStop=$EXEC_STOP +Restart=on-failure +WorkingDirectory=$DESTDIR +RuntimeDirectory=${NAME} +TasksMax=infinity +LimitNOFILE=1048576 +LimitNPROC=1048576 +LimitCORE=infinity +TimeoutStartSec=0 + +[Install] +WantedBy=multi-user.target diff --git a/contrib/system_containers/rhel/set_mounts.sh b/contrib/system_containers/rhel/set_mounts.sh new file mode 100755 index 00000000..c1f0c050 --- /dev/null +++ b/contrib/system_containers/rhel/set_mounts.sh @@ -0,0 +1,7 @@ +#!/bin/sh + +findmnt /var/lib/containers/storage > /dev/null || mount --rbind --make-shared /var/lib/containers/storage /var/lib/containers/storage +findmnt /var/lib/origin > /dev/null || mount --bind --make-shared /var/lib/origin /var/lib/origin +findmnt /var/lib/kubelet > /dev/null || mount --bind --make-shared /var/lib/kubelet /var/lib/kubelet +mount --make-shared /run +findmnt /run/systemd > /dev/null || mount --bind --make-rslave /run/systemd /run/systemd diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template new file mode 100644 index 00000000..015919c4 --- /dev/null +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -0,0 +1,5 @@ +d ${RUN_DIRECTORY}/${NAME} - - - - - +d /etc/crio - - - - - +Z /etc/crio - - - - - +d ${STATE_DIRECTORY}/origin - - - - - +d ${STATE_DIRECTORY}/kubelet - - - - - From 7d2bde110abe94242fbe2eed93cbfa395ad76674 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 8 Dec 2017 21:26:37 +0100 Subject: [PATCH 17/59] contrib: test: integration: enable more e2e kube tests Signed-off-by: Antonio Murdaca --- contrib/test/integration/build/kubernetes.yml | 2 ++ contrib/test/integration/e2e.yml | 3 ++- contrib/test/integration/main.yml | 4 ++-- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/contrib/test/integration/build/kubernetes.yml b/contrib/test/integration/build/kubernetes.yml index 3a18321c..1aa8db8d 100644 --- a/contrib/test/integration/build/kubernetes.yml +++ b/contrib/test/integration/build/kubernetes.yml @@ -47,6 +47,8 @@ export API_HOST={{ ansible_eth0.ipv4.address }} export API_HOST_IP={{ ansible_eth0.ipv4.address }} export KUBE_ENABLE_CLUSTER_DNS=true + export ENABLE_HOSTPATH_PROVISIONER=true + export KUBE_ENABLE_CLUSTER_DASHBOARD=true ./hack/local-up-cluster.sh mode: "u=rwx,g=rwx,o=x" diff --git a/contrib/test/integration/e2e.yml b/contrib/test/integration/e2e.yml index 5c4d656e..4dd87950 100644 --- a/contrib/test/integration/e2e.yml +++ b/contrib/test/integration/e2e.yml @@ -37,13 +37,14 @@ path: "{{ artifacts }}" state: directory +# TODO remove the last test skipped once https://github.com/kubernetes-incubator/cri-o/pull/1217 is merged - name: Buffer the e2e testing command to workaround Ansible YAML folding "feature" set_fact: e2e_shell_cmd: > /usr/bin/go run hack/e2e.go --test --test_args="-host=https://{{ ansible_default_ipv4.address }}:6443 - --ginkgo.focus=\[Conformance\] + --ginkgo.skip=\[Slow\]|\[Serial\]|\[Disruptive\]|\[Flaky\]|\[Feature:.+\]|PersistentVolumes|\[HPA\]|should.support.building.a.client.with.a.CSR|should.support.inline.execution.and.attach --report-dir={{ artifacts }}" &> {{ artifacts }}/e2e.log # Fix vim syntax hilighting: " diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml index e5c22c03..cbbcd12e 100644 --- a/contrib/test/integration/main.yml +++ b/contrib/test/integration/main.yml @@ -96,8 +96,8 @@ vars: force_clone: True # master as of 12/11/2017 - k8s_git_version: "a48f11c2257d84b0bec89864025508b0ef626b4f" - k8s_github_fork: "kubernetes" + k8s_git_version: "master-nfs-fix" + k8s_github_fork: "runcom" crio_socket: "/var/run/crio/crio.sock" - name: run k8s e2e tests include: e2e.yml From 455245e65bd6375c57d3a475b583789ffb80af94 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 15 Dec 2017 14:33:44 +0100 Subject: [PATCH 18/59] bump runc to c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f Signed-off-by: Antonio Murdaca --- Dockerfile | 2 +- contrib/test/integration/build/runc.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6f170b26..c1f02853 100644 --- a/Dockerfile +++ b/Dockerfile @@ -56,7 +56,7 @@ RUN mkdir -p /usr/src/criu \ && rm -rf /usr/src/criu # Install runc -ENV RUNC_COMMIT 84a082bfef6f932de921437815355186db37aeb1 +ENV RUNC_COMMIT c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f RUN set -x \ && export GOPATH="$(mktemp -d)" \ && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ diff --git a/contrib/test/integration/build/runc.yml b/contrib/test/integration/build/runc.yml index 7bb0491d..f3221f4a 100644 --- a/contrib/test/integration/build/runc.yml +++ b/contrib/test/integration/build/runc.yml @@ -4,7 +4,7 @@ git: repo: "https://github.com/opencontainers/runc.git" dest: "{{ ansible_env.GOPATH }}/src/github.com/opencontainers/runc" - version: "84a082bfef6f932de921437815355186db37aeb1" + version: "c6e4a1ebeb1a72b529c6f1b6ee2b1ae5b868b14f" - name: build runc make: From ecc572e7cfff83f3a329acbe210112d23e916b47 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 15 Dec 2017 15:31:55 +0100 Subject: [PATCH 19/59] lib,oci: drop stateLock when possible Should fix a possible deadlock in, at least, ListPodSandbox. There seems to be no reason to hold stateLock when doing operations on the memory_store for containers and sandboxes. Signed-off-by: Antonio Murdaca --- lib/container_server.go | 43 +++++++++---------------------------- lib/sandbox/memory_store.go | 3 ++- oci/memory_store.go | 3 ++- 3 files changed, 14 insertions(+), 35 deletions(-) diff --git a/lib/container_server.go b/lib/container_server.go index 7c9c987d..9a4704b7 100644 --- a/lib/container_server.go +++ b/lib/container_server.go @@ -624,8 +624,6 @@ type containerServerState struct { // AddContainer adds a container to the container state store func (c *ContainerServer) AddContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sandbox := c.state.sandboxes.Get(ctr.Sandbox()) sandbox.AddContainer(ctr) c.state.containers.Add(ctr.ID(), ctr) @@ -633,37 +631,26 @@ func (c *ContainerServer) AddContainer(ctr *oci.Container) { // AddInfraContainer adds a container to the container state store func (c *ContainerServer) AddInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Add(ctr.ID(), ctr) } // GetContainer returns a container by its ID func (c *ContainerServer) GetContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.Get(id) } // GetInfraContainer returns a container by its ID func (c *ContainerServer) GetInfraContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.infraContainers.Get(id) } // HasContainer checks if a container exists in the state func (c *ContainerServer) HasContainer(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - ctr := c.state.containers.Get(id) - return ctr != nil + return c.state.containers.Get(id) != nil } // RemoveContainer removes a container from the container state store func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sbID := ctr.Sandbox() sb := c.state.sandboxes.Get(sbID) sb.RemoveContainer(ctr) @@ -672,15 +659,11 @@ func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { // RemoveInfraContainer removes a container from the container state store func (c *ContainerServer) RemoveInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Delete(ctr.ID()) } // listContainers returns a list of all containers stored by the server state func (c *ContainerServer) listContainers() []*oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.List() } @@ -704,43 +687,36 @@ func (c *ContainerServer) ListContainers(filters ...func(*oci.Container) bool) ( // AddSandbox adds a sandbox to the sandbox state store func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.sandboxes.Add(sb.ID(), sb) + + c.stateLock.Lock() c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++ + c.stateLock.Unlock() } // GetSandbox returns a sandbox by its ID func (c *ContainerServer) GetSandbox(id string) *sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.sandboxes.Get(id) } // GetSandboxContainer returns a sandbox's infra container func (c *ContainerServer) GetSandboxContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() sb := c.state.sandboxes.Get(id) return sb.InfraContainer() } // HasSandbox checks if a sandbox exists in the state func (c *ContainerServer) HasSandbox(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sb := c.state.sandboxes.Get(id) - return sb != nil + return c.state.sandboxes.Get(id) != nil } // RemoveSandbox removes a sandbox from the state store func (c *ContainerServer) RemoveSandbox(id string) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sb := c.state.sandboxes.Get(id) processLabel := sb.ProcessLabel() - c.state.sandboxes.Delete(id) level := selinux.NewContext(processLabel)["level"] + + c.stateLock.Lock() pl, ok := c.state.processLevels[level] if ok { c.state.processLevels[level] = pl - 1 @@ -749,12 +725,13 @@ func (c *ContainerServer) RemoveSandbox(id string) { delete(c.state.processLevels, level) } } + c.stateLock.Unlock() + + c.state.sandboxes.Delete(id) } // ListSandboxes lists all sandboxes in the state store func (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.sandboxes.List() } diff --git a/lib/sandbox/memory_store.go b/lib/sandbox/memory_store.go index 6bf0cd84..17533bf7 100644 --- a/lib/sandbox/memory_store.go +++ b/lib/sandbox/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Sandbox) { // Get returns a sandbox from the store by id. func (c *memoryStore) Get(id string) *Sandbox { + var res *Sandbox c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } diff --git a/oci/memory_store.go b/oci/memory_store.go index 6223ce7f..3f0cac55 100644 --- a/oci/memory_store.go +++ b/oci/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Container) { // Get returns a container from the store by id. func (c *memoryStore) Get(id string) *Container { + var res *Container c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } From 814c6ab0913d827543696b366048056a31d9529c Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 15 Dec 2017 15:36:05 +0100 Subject: [PATCH 20/59] version: bump v1.9.0 Signed-off-by: Antonio Murdaca --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 7eaa12c4..2fbe2391 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "1.9.0-dev" +const Version = "1.9.0" From 9df76e0e7d0ac4886a7bdc0a7b717ac859fc0f69 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 15 Dec 2017 15:36:19 +0100 Subject: [PATCH 21/59] version: bump v1.9.1-dev Signed-off-by: Antonio Murdaca --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 2fbe2391..5bffa4d7 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "1.9.0" +const Version = "1.9.1-dev" From f67662e684bd5a12f7dab1512f8446363280e0dc Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 15 Dec 2017 15:31:55 +0100 Subject: [PATCH 22/59] lib,oci: drop stateLock when possible Should fix a possible deadlock in, at least, ListPodSandbox. There seems to be no reason to hold stateLock when doing operations on the memory_store for containers and sandboxes. Signed-off-by: Antonio Murdaca --- lib/container_server.go | 43 +++++++++---------------------------- lib/sandbox/memory_store.go | 3 ++- oci/memory_store.go | 3 ++- 3 files changed, 14 insertions(+), 35 deletions(-) diff --git a/lib/container_server.go b/lib/container_server.go index 7c9c987d..9a4704b7 100644 --- a/lib/container_server.go +++ b/lib/container_server.go @@ -624,8 +624,6 @@ type containerServerState struct { // AddContainer adds a container to the container state store func (c *ContainerServer) AddContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sandbox := c.state.sandboxes.Get(ctr.Sandbox()) sandbox.AddContainer(ctr) c.state.containers.Add(ctr.ID(), ctr) @@ -633,37 +631,26 @@ func (c *ContainerServer) AddContainer(ctr *oci.Container) { // AddInfraContainer adds a container to the container state store func (c *ContainerServer) AddInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Add(ctr.ID(), ctr) } // GetContainer returns a container by its ID func (c *ContainerServer) GetContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.Get(id) } // GetInfraContainer returns a container by its ID func (c *ContainerServer) GetInfraContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.infraContainers.Get(id) } // HasContainer checks if a container exists in the state func (c *ContainerServer) HasContainer(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - ctr := c.state.containers.Get(id) - return ctr != nil + return c.state.containers.Get(id) != nil } // RemoveContainer removes a container from the container state store func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sbID := ctr.Sandbox() sb := c.state.sandboxes.Get(sbID) sb.RemoveContainer(ctr) @@ -672,15 +659,11 @@ func (c *ContainerServer) RemoveContainer(ctr *oci.Container) { // RemoveInfraContainer removes a container from the container state store func (c *ContainerServer) RemoveInfraContainer(ctr *oci.Container) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.infraContainers.Delete(ctr.ID()) } // listContainers returns a list of all containers stored by the server state func (c *ContainerServer) listContainers() []*oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.containers.List() } @@ -704,43 +687,36 @@ func (c *ContainerServer) ListContainers(filters ...func(*oci.Container) bool) ( // AddSandbox adds a sandbox to the sandbox state store func (c *ContainerServer) AddSandbox(sb *sandbox.Sandbox) { - c.stateLock.Lock() - defer c.stateLock.Unlock() c.state.sandboxes.Add(sb.ID(), sb) + + c.stateLock.Lock() c.state.processLevels[selinux.NewContext(sb.ProcessLabel())["level"]]++ + c.stateLock.Unlock() } // GetSandbox returns a sandbox by its ID func (c *ContainerServer) GetSandbox(id string) *sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.sandboxes.Get(id) } // GetSandboxContainer returns a sandbox's infra container func (c *ContainerServer) GetSandboxContainer(id string) *oci.Container { - c.stateLock.Lock() - defer c.stateLock.Unlock() sb := c.state.sandboxes.Get(id) return sb.InfraContainer() } // HasSandbox checks if a sandbox exists in the state func (c *ContainerServer) HasSandbox(id string) bool { - c.stateLock.Lock() - defer c.stateLock.Unlock() - sb := c.state.sandboxes.Get(id) - return sb != nil + return c.state.sandboxes.Get(id) != nil } // RemoveSandbox removes a sandbox from the state store func (c *ContainerServer) RemoveSandbox(id string) { - c.stateLock.Lock() - defer c.stateLock.Unlock() sb := c.state.sandboxes.Get(id) processLabel := sb.ProcessLabel() - c.state.sandboxes.Delete(id) level := selinux.NewContext(processLabel)["level"] + + c.stateLock.Lock() pl, ok := c.state.processLevels[level] if ok { c.state.processLevels[level] = pl - 1 @@ -749,12 +725,13 @@ func (c *ContainerServer) RemoveSandbox(id string) { delete(c.state.processLevels, level) } } + c.stateLock.Unlock() + + c.state.sandboxes.Delete(id) } // ListSandboxes lists all sandboxes in the state store func (c *ContainerServer) ListSandboxes() []*sandbox.Sandbox { - c.stateLock.Lock() - defer c.stateLock.Unlock() return c.state.sandboxes.List() } diff --git a/lib/sandbox/memory_store.go b/lib/sandbox/memory_store.go index 6bf0cd84..17533bf7 100644 --- a/lib/sandbox/memory_store.go +++ b/lib/sandbox/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Sandbox) { // Get returns a sandbox from the store by id. func (c *memoryStore) Get(id string) *Sandbox { + var res *Sandbox c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } diff --git a/oci/memory_store.go b/oci/memory_store.go index 6223ce7f..3f0cac55 100644 --- a/oci/memory_store.go +++ b/oci/memory_store.go @@ -25,8 +25,9 @@ func (c *memoryStore) Add(id string, cont *Container) { // Get returns a container from the store by id. func (c *memoryStore) Get(id string) *Container { + var res *Container c.RLock() - res := c.s[id] + res = c.s[id] c.RUnlock() return res } From a85f3127d8291e001c788de1d273c3a888061cb9 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Mon, 11 Dec 2017 15:38:31 -0500 Subject: [PATCH 23/59] Improve error messages on missing runtime Also stat.h is included twice, Add more info on log file name and error when failing to open. Signed-off-by: Daniel J Walsh --- conmon/conmon.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/conmon/conmon.c b/conmon/conmon.c index b00cb0cd..477b98bf 100644 --- a/conmon/conmon.c +++ b/conmon/conmon.c @@ -12,7 +12,6 @@ #include #include #include -#include #include #include #include @@ -350,7 +349,7 @@ static int write_k8s_log(int fd, stdpipe_t pipe, const char *buf, ssize_t buflen /* Open the log path file again */ log_fd = open(opt_log_path, O_WRONLY | O_APPEND | O_CREAT | O_CLOEXEC, 0600); if (log_fd < 0) - pexit("Failed to open log file"); + pexit("Failed to open log file %s: %s", opt_log_path, strerror(errno)); fd = log_fd; } @@ -1121,6 +1120,8 @@ int main(int argc, char *argv[]) if (opt_runtime_path == NULL) nexit("Runtime path not provided. Use --runtime"); + if (access(opt_runtime_path, X_OK) < 0) + pexit("Runtime path %s is not valid: %s", opt_runtime_path, strerror(errno)); if (!opt_exec && opt_exit_dir == NULL) nexit("Container exit directory not provided. Use --exit-dir"); From de0be63495daeee2d3847562bfeafcd1132e26c0 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Wed, 20 Dec 2017 18:23:37 +0100 Subject: [PATCH 24/59] container_create: set cpuset cpus|mems Signed-off-by: Antonio Murdaca --- server/container_create.go | 29 +++++++---------------------- test/ctr.bats | 28 ++++++++++++++++++++++++++++ test/testdata/container_redis.json | 4 +++- 3 files changed, 38 insertions(+), 23 deletions(-) diff --git a/server/container_create.go b/server/container_create.go index 080a1af7..56a94c2b 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -785,28 +785,13 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if linux != nil { resources := linux.GetResources() if resources != nil { - cpuPeriod := resources.CpuPeriod - if cpuPeriod != 0 { - specgen.SetLinuxResourcesCPUPeriod(uint64(cpuPeriod)) - } - - cpuQuota := resources.CpuQuota - if cpuQuota != 0 { - specgen.SetLinuxResourcesCPUQuota(cpuQuota) - } - - cpuShares := resources.CpuShares - if cpuShares != 0 { - specgen.SetLinuxResourcesCPUShares(uint64(cpuShares)) - } - - memoryLimit := resources.MemoryLimitInBytes - if memoryLimit != 0 { - specgen.SetLinuxResourcesMemoryLimit(memoryLimit) - } - - oomScoreAdj := resources.OomScoreAdj - specgen.SetProcessOOMScoreAdj(int(oomScoreAdj)) + specgen.SetLinuxResourcesCPUPeriod(uint64(resources.GetCpuPeriod())) + specgen.SetLinuxResourcesCPUQuota(resources.GetCpuQuota()) + specgen.SetLinuxResourcesCPUShares(uint64(resources.GetCpuShares())) + specgen.SetLinuxResourcesMemoryLimit(resources.GetMemoryLimitInBytes()) + specgen.SetProcessOOMScoreAdj(int(resources.GetOomScoreAdj())) + specgen.SetLinuxResourcesCPUCpus(resources.GetCpusetCpus()) + specgen.SetLinuxResourcesCPUMems(resources.GetCpusetMems()) } var cgPath string diff --git a/test/ctr.bats b/test/ctr.bats index 1bf4f8b9..5f37c708 100644 --- a/test/ctr.bats +++ b/test/ctr.bats @@ -1061,3 +1061,31 @@ function teardown() { cleanup_pods stop_crio } + +@test "ctr resources" { + start_crio + run crictl runs "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + pod_id="$output" + run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json + echo "$output" + [ "$status" -eq 0 ] + ctr_id="$output" + run crictl start "$ctr_id" + echo "$output" + [ "$status" -eq 0 ] + + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.cpus" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0-1" ]] + run crictl exec --sync "$ctr_id" sh -c "cat /sys/fs/cgroup/cpuset/cpuset.mems" + echo "$output" + [ "$status" -eq 0 ] + [[ "$output" =~ "0" ]] + + cleanup_ctrs + cleanup_pods + stop_crio +} diff --git a/test/testdata/container_redis.json b/test/testdata/container_redis.json index 20f1cbe5..39a0865b 100644 --- a/test/testdata/container_redis.json +++ b/test/testdata/container_redis.json @@ -49,7 +49,9 @@ "cpu_period": 10000, "cpu_quota": 20000, "cpu_shares": 512, - "oom_score_adj": 30 + "oom_score_adj": 30, + "cpuset_cpus": "0-1", + "cpuset_mems": "0" }, "security_context": { "capabilities": { From 8b1fefad7179992d2a0c31b6b0b39a6e2e0ccf4f Mon Sep 17 00:00:00 2001 From: Jianyong Wu Date: Thu, 28 Dec 2017 16:45:50 +0800 Subject: [PATCH 25/59] Add bsdmainutils tool to Dockerfile to enable integration test on arm64 Build image for integration test on arm64 will fail for lack of hexdump. Add bsdmainutils tool to eliminate that failure and let build image succussfully Signed-off-by: Jianyong Wu --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index c1f02853..1fb5e569 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,6 +38,7 @@ RUN apt-get update && apt-get install -y \ netcat \ socat \ --no-install-recommends \ + bsdmainutils \ && apt-get clean # install bats From 88b13dfddf661a23c206d24a16abdb81d1fc2e86 Mon Sep 17 00:00:00 2001 From: Haoran Wang Date: Fri, 29 Dec 2017 14:25:05 +0800 Subject: [PATCH 26/59] fix log Signed-off-by: Haoran Wang --- cmd/crio/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/crio/main.go b/cmd/crio/main.go index f2cd32c5..a058f296 100644 --- a/cmd/crio/main.go +++ b/cmd/crio/main.go @@ -509,7 +509,7 @@ func main() { if graceful && strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") { err = nil } else { - logrus.Errorf("Failed to serve grpc grpc request: %v", err) + logrus.Errorf("Failed to serve grpc request: %v", err) } } }() From a28eb8374e6d0022e8323eb643d7784b9937a146 Mon Sep 17 00:00:00 2001 From: Aaron Crickenberger Date: Wed, 20 Dec 2017 14:11:34 -0500 Subject: [PATCH 27/59] Update code-of-conduct.md Refer to kubernetes/community as authoritative source for code of conduct Signed-off-by: Aaron Crickenberger --- code-of-conduct.md | 56 ++-------------------------------------------- 1 file changed, 2 insertions(+), 54 deletions(-) diff --git a/code-of-conduct.md b/code-of-conduct.md index 215ce7ac..0d15c00c 100644 --- a/code-of-conduct.md +++ b/code-of-conduct.md @@ -1,55 +1,3 @@ -## Kubernetes Community Code of Conduct +# Kubernetes Community Code of Conduct -### Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of fostering -an open and welcoming community, we pledge to respect all people who contribute -through reporting issues, posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free experience for -everyone, regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, body size, race, ethnicity, age, -religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery. -* Personal attacks. -* Trolling or insulting/derogatory comments. -* Public or private harassment. -* Publishing other's private information, such as physical or electronic addresses, - without explicit permission. -* Other unethical or unprofessional conduct. - -Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions that are not -aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers -commit themselves to fairly and consistently applying these principles to every aspect -of managing this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a Kubernetes maintainer, Sarah Novotny , and/or Dan Kohn . - -This Code of Conduct is adapted from the Contributor Covenant -(http://contributor-covenant.org), version 1.2.0, available at -http://contributor-covenant.org/version/1/2/0/ - -### Kubernetes Events Code of Conduct - -Kubernetes events are working conferences intended for professional networking and collaboration in the -Kubernetes community. Attendees are expected to behave according to professional standards and in accordance -with their employer's policies on appropriate workplace behavior. - -While at Kubernetes events or related social networking opportunities, attendees should not engage in -discriminatory or offensive speech or actions regarding gender, sexuality, race, or religion. Speakers should -be especially aware of these concerns. - -The Kubernetes team does not condone any statements by speakers contrary to these standards. The Kubernetes -team reserves the right to deny entrance and/or eject from an event (without refund) any individual found to -be engaging in discriminatory or offensive speech or actions. - -Please bring any concerns to the immediate attention of the Kubernetes event staff. +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) From 3c1c6d047ee9825f025e764f69c366e9083292a5 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Thu, 4 Jan 2018 10:52:33 -0500 Subject: [PATCH 28/59] Add -i flag to speed up compilation of cri-o packages Instead of compiling all of the *.go files each time, the -i flag will cause them to be only compiled if they changed. This will make developers much happier. Signed-off-by: Daniel J Walsh --- Makefile | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index d8508822..62770df7 100644 --- a/Makefile +++ b/Makefile @@ -73,16 +73,16 @@ pause: $(MAKE) -C $@ test/bin2img/bin2img: .gopathok $(wildcard test/bin2img/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/bin2img test/copyimg/copyimg: .gopathok $(wildcard test/copyimg/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/copyimg test/checkseccomp/checkseccomp: .gopathok $(wildcard test/checkseccomp/*.go) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o $@ $(PROJECT)/test/checkseccomp crio: .gopathok $(shell hack/find-godeps.sh $(GOPKGDIR) cmd/crio $(PROJECT)) - $(GO) build $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio + $(GO) build -i $(LDFLAGS) -tags "$(BUILDTAGS) containers_image_ostree_stub" -o bin/$@ $(PROJECT)/cmd/crio crio.conf: crio ./bin/crio --config="" config --default > crio.conf From 3881f375b9ac87d9e5be636293d8f44d7d693b94 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:29 +0100 Subject: [PATCH 29/59] syscontainer, rhel: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/rhel/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/rhel/run.sh +++ b/contrib/system_containers/rhel/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From 1f75ec82e1f79c5b4adc6a0b830782ac61226a92 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:39 +0100 Subject: [PATCH 30/59] syscontainer, fedora: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/fedora/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/fedora/run.sh +++ b/contrib/system_containers/fedora/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From b5167d4e8ffc4e3351fc0c9db00f74cfc8c470ed Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:52 +0100 Subject: [PATCH 31/59] syscontainer, centos: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/centos/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/centos/run.sh +++ b/contrib/system_containers/centos/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From b1b380d67b1fb1668446599181d7115aca3902a0 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:00:43 +0100 Subject: [PATCH 32/59] syscontainer, centos: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/centos/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/centos/tmpfiles.template +++ b/contrib/system_containers/centos/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 6bb1b7e17d9dce3e730aed26faa197a4680fae3a Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:00:53 +0100 Subject: [PATCH 33/59] syscontainer, rhel: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/rhel/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/rhel/tmpfiles.template +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 2cb22eba497035ebd4d223813b1f94d8039e683f Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:01:03 +0100 Subject: [PATCH 34/59] syscontainer, fedora: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/fedora/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/fedora/tmpfiles.template +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 23d20c9db54147e83f43b124237858ddd9cc55f0 Mon Sep 17 00:00:00 2001 From: Daniel J Walsh Date: Thu, 4 Jan 2018 10:53:55 -0500 Subject: [PATCH 35/59] Allow additional arguments to be passed into hooks If a packager wants to be able to support addititional arguments on his hook this will allow them to setup the configuration with these arguments. For example this would allow a hook developer to add support for a --debug flag to change the level of debugging in his hook. In order to complete this task, I had to vendor in the latest github.com://opencontainers/runtime-tools, which caused me to have to fix a Mount and Capability interface calls Signed-off-by: Daniel J Walsh --- hooks.md | 5 +- lib/hooks.go | 1 + server/container_create.go | 117 ++- server/sandbox_run.go | 17 +- vendor.conf | 5 +- .../opencontainers/runtime-tools/README.md | 182 +++- .../runtime-tools/filepath/abs.go | 48 + .../runtime-tools/filepath/ancestor.go | 32 + .../runtime-tools/filepath/clean.go | 74 ++ .../runtime-tools/filepath/doc.go | 6 + .../runtime-tools/filepath/join.go | 9 + .../runtime-tools/filepath/separator.go | 9 + .../runtime-tools/generate/generate.go | 764 +++++++++----- .../runtime-tools/generate/spec.go | 63 ++ .../runtime-tools/specerror/bundle.go | 29 + .../runtime-tools/specerror/config-linux.go | 134 +++ .../runtime-tools/specerror/config-windows.go | 32 + .../runtime-tools/specerror/config.go | 188 ++++ .../runtime-tools/specerror/error.go | 113 +- .../runtime-tools/specerror/runtime-linux.go | 23 + .../runtime-tools/specerror/runtime.go | 179 ++++ .../runtime-tools/validate/validate.go | 534 +++++----- .../gojsonpointer/LICENSE-APACHE-2.0.txt | 202 ++++ .../xeipuuv/gojsonpointer/README.md | 8 + .../xeipuuv/gojsonpointer/pointer.go | 190 ++++ .../gojsonreference/LICENSE-APACHE-2.0.txt | 202 ++++ .../xeipuuv/gojsonreference/README.md | 10 + .../xeipuuv/gojsonreference/reference.go | 141 +++ .../gojsonschema/LICENSE-APACHE-2.0.txt | 202 ++++ .../github.com/xeipuuv/gojsonschema/README.md | 295 ++++++ .../github.com/xeipuuv/gojsonschema/errors.go | 283 +++++ .../xeipuuv/gojsonschema/format_checkers.go | 250 +++++ .../xeipuuv/gojsonschema/internalLog.go | 37 + .../xeipuuv/gojsonschema/jsonContext.go | 72 ++ .../xeipuuv/gojsonschema/jsonLoader.go | 341 ++++++ .../xeipuuv/gojsonschema/locales.go | 286 ++++++ .../github.com/xeipuuv/gojsonschema/result.go | 172 ++++ .../github.com/xeipuuv/gojsonschema/schema.go | 971 ++++++++++++++++++ .../xeipuuv/gojsonschema/schemaPool.go | 103 ++ .../gojsonschema/schemaReferencePool.go | 68 ++ .../xeipuuv/gojsonschema/schemaType.go | 83 ++ .../xeipuuv/gojsonschema/subSchema.go | 227 ++++ .../github.com/xeipuuv/gojsonschema/types.go | 58 ++ .../github.com/xeipuuv/gojsonschema/utils.go | 208 ++++ .../xeipuuv/gojsonschema/validation.go | 844 +++++++++++++++ 45 files changed, 7145 insertions(+), 672 deletions(-) create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/abs.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/clean.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/doc.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/join.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/filepath/separator.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/config.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go create mode 100644 vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonpointer/pointer.go create mode 100644 vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonreference/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonreference/reference.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt create mode 100644 vendor/github.com/xeipuuv/gojsonschema/README.md create mode 100644 vendor/github.com/xeipuuv/gojsonschema/errors.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/format_checkers.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/internalLog.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonContext.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/locales.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/result.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaPool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/schemaType.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/subSchema.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/types.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/utils.go create mode 100644 vendor/github.com/xeipuuv/gojsonschema/validation.go diff --git a/hooks.md b/hooks.md index 809dbdc2..cd3d0a40 100644 --- a/hooks.md +++ b/hooks.md @@ -53,6 +53,7 @@ type HookParams struct { Cmds []string `json:"cmds"` Annotations []string `json:"annotations"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } ``` @@ -63,6 +64,7 @@ type HookParams struct { | cmds | List of regular expressions to match the command for running the container. If the command matches a regex, the hook will be run | Optional | | annotations | List of regular expressions to match against the Annotations in the container runtime spec, if an Annotation matches the hook will be run|optional | | hasbindmounts | Tells CRI-O to run the hook if the container has bind mounts from the host into the container | Optional | +| arguments | Additional arguments to append to the hook command when executing it. For example --debug | Optional | ### Example @@ -85,6 +87,7 @@ cat /etc/containers/oci/hooks.d/oci-systemd-hook.json "hasbindmounts": true, "hook": "/usr/libexec/oci/hooks.d/oci-umount", "stages": [ "prestart" ] + "arguments": [ "--debug" ] } ``` -In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container. +In this example the oci-umount will only be run during the prestart phase if the container has volume/bind mounts from the host into the container, it will also execute oci-umount with the --debug argument. diff --git a/lib/hooks.go b/lib/hooks.go index 074ce7cc..fab563f0 100644 --- a/lib/hooks.go +++ b/lib/hooks.go @@ -27,6 +27,7 @@ type HookParams struct { Cmds []string `json:"cmd"` Annotations []string `json:"annotation"` HasBindMounts bool `json:"hasbindmounts"` + Arguments []string `json:"arguments"` } // readHook reads hooks json files, verifies it and returns the json config diff --git a/server/container_create.go b/server/container_create.go index 56a94c2b..6afc412b 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -426,15 +426,18 @@ func buildOCIProcessArgs(containerKubeConfig *pb.ContainerConfig, imageOCIConfig func addOCIHook(specgen *generate.Generator, hook lib.HookParams) error { logrus.Debugf("AddOCIHook", hook) for _, stage := range hook.Stage { + h := rspec.Hook{ + Path: hook.Hook, + Args: append([]string{hook.Hook}, hook.Arguments...), + Env: []string{fmt.Sprintf("stage=%s", stage)}, + } switch stage { case "prestart": - specgen.AddPreStartHook(hook.Hook, []string{hook.Hook, "prestart"}) - + specgen.AddPreStartHook(h) case "poststart": - specgen.AddPostStartHook(hook.Hook, []string{hook.Hook, "poststart"}) - + specgen.AddPostStartHook(h) case "poststop": - specgen.AddPostStopHook(hook.Hook, []string{hook.Hook, "poststop"}) + specgen.AddPostStopHook(h) } } return nil @@ -710,8 +713,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.AddAnnotation(annotations.Volumes, string(volumesJSON)) + mnt := rspec.Mount{ + Destination: "/sys/fs/cgroup", + Type: "cgroup", + Source: "cgroup", + Options: []string{"nosuid", "noexec", "nodev", "relatime", "ro"}, + } // Add cgroup mount so container process can introspect its own limits - specgen.AddCgroupsMount("ro") + specgen.AddMount(mnt) if err := addDevices(sb, containerConfig, &specgen); err != nil { return nil, err @@ -830,14 +839,38 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, // see https://github.com/kubernetes/kubernetes/issues/51980 if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { for _, c := range getOCICapabilitiesList() { - if err := specgen.AddProcessCapability(c); err != nil { + if err := specgen.AddProcessCapabilityAmbient(c); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityBounding(c); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityEffective(c); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityInheritable(c); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityPermitted(c); err != nil { return nil, err } } } if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { for _, c := range getOCICapabilitiesList() { - if err := specgen.DropProcessCapability(c); err != nil { + if err := specgen.DropProcessCapabilityAmbient(c); err != nil { + return nil, err + } + if err := specgen.DropProcessCapabilityBounding(c); err != nil { + return nil, err + } + if err := specgen.DropProcessCapabilityEffective(c); err != nil { + return nil, err + } + if err := specgen.DropProcessCapabilityInheritable(c); err != nil { + return nil, err + } + if err := specgen.DropProcessCapabilityPermitted(c); err != nil { return nil, err } } @@ -848,7 +881,19 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if strings.ToUpper(cap) == "ALL" { continue } - if err := specgen.AddProcessCapability(toCAPPrefixed(cap)); err != nil { + if err := specgen.AddProcessCapabilityAmbient(toCAPPrefixed(cap)); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityBounding(toCAPPrefixed(cap)); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityEffective(toCAPPrefixed(cap)); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityInheritable(toCAPPrefixed(cap)); err != nil { + return nil, err + } + if err := specgen.AddProcessCapabilityPermitted(toCAPPrefixed(cap)); err != nil { return nil, err } } @@ -857,7 +902,19 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, if strings.ToUpper(cap) == "ALL" { continue } - if err := specgen.DropProcessCapability(toCAPPrefixed(cap)); err != nil { + if err := specgen.DropProcessCapabilityAmbient(toCAPPrefixed(cap)); err != nil { + return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) + } + if err := specgen.DropProcessCapabilityBounding(toCAPPrefixed(cap)); err != nil { + return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) + } + if err := specgen.DropProcessCapabilityEffective(toCAPPrefixed(cap)); err != nil { + return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) + } + if err := specgen.DropProcessCapabilityInheritable(toCAPPrefixed(cap)); err != nil { + return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) + } + if err := specgen.DropProcessCapabilityPermitted(toCAPPrefixed(cap)); err != nil { return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) } } @@ -964,8 +1021,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, specgen.AddAnnotation(annotations.ImageRef, imageRef) specgen.AddAnnotation(annotations.IP, sb.IP()) + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ShmPath(), + Destination: "/etc/shm", + Options: []string{"rw", "bind"}, + } // bind mount the pod shm - specgen.AddBindMount(sb.ShmPath(), "/dev/shm", []string{"rw"}) + specgen.AddMount(mnt) options := []string{"rw"} if readOnlyRootfs { @@ -976,8 +1039,14 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } + mnt = rspec.Mount{ + Type: "bind", + Source: sb.ResolvPath(), + Destination: "/etc/resolv.conf", + Options: append(options, "bind"), + } // bind mount the pod resolver file - specgen.AddBindMount(sb.ResolvPath(), "/etc/resolv.conf", options) + specgen.AddMount(mnt) } if sb.HostnamePath() != "" { @@ -985,12 +1054,24 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, return nil, err } - specgen.AddBindMount(sb.HostnamePath(), "/etc/hostname", options) + mnt = rspec.Mount{ + Type: "bind", + Source: sb.HostnamePath(), + Destination: "/etc/hostname", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } // Bind mount /etc/hosts for host networking containers if hostNetwork(containerConfig) { - specgen.AddBindMount("/etc/hosts", "/etc/hosts", options) + mnt = rspec.Mount{ + Type: "bind", + Source: "/etc/hosts", + Destination: "/etc/hosts", + Options: append(options, "bind"), + } + specgen.AddMount(mnt) } // Set hostname and add env for hostname @@ -1132,7 +1213,13 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, sort.Sort(orderedMounts(mounts)) for _, m := range mounts { - specgen.AddBindMount(m.Source, m.Destination, m.Options) + mnt = rspec.Mount{ + Type: "bind", + Source: m.Source, + Destination: m.Destination, + Options: append(m.Options, "bind"), + } + specgen.AddMount(mnt) } if err := s.setupOCIHooks(&specgen, sb, containerConfig, processArgs[0]); err != nil { diff --git a/server/sandbox_run.go b/server/sandbox_run.go index 9e9ff487..0cf40ea8 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -210,8 +210,13 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err := label.Relabel(resolvPath, mountLabel, true); err != nil && err != unix.ENOTSUP { return nil, err } - - g.AddBindMount(resolvPath, "/etc/resolv.conf", []string{"ro"}) + mnt := runtimespec.Mount{ + Type: "bind", + Source: resolvPath, + Destination: "/etc/resolv.conf", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) } // add metadata @@ -480,7 +485,13 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest if err := label.Relabel(hostnamePath, mountLabel, true); err != nil && err != unix.ENOTSUP { return nil, err } - g.AddBindMount(hostnamePath, "/etc/hostname", []string{"ro"}) + mnt := runtimespec.Mount{ + Type: "bind", + Source: hostnamePath, + Destination: "/etc/hostname", + Options: []string{"ro", "bind"}, + } + g.AddMount(mnt) g.AddAnnotation(annotations.HostnamePath, hostnamePath) sb.AddHostnamePath(hostnamePath) diff --git a/vendor.conf b/vendor.conf index 6e8c533a..acd066aa 100644 --- a/vendor.conf +++ b/vendor.conf @@ -20,7 +20,7 @@ github.com/containernetworking/cni v0.4.0 google.golang.org/grpc v1.0.4 https://github.com/grpc/grpc-go github.com/opencontainers/selinux b29023b86e4a69d1b46b7e7b4e2b6fda03f0b9cd github.com/opencontainers/go-digest v1.0.0-rc0 -github.com/opencontainers/runtime-tools d3f7e9e9e631c7e87552d67dc7c86de33c3fb68a +github.com/opencontainers/runtime-tools 625e2322645b151a7cbb93a8b42920933e72167f github.com/opencontainers/runc 45bde006ca8c90e089894508708bcf0e2cdf9e13 github.com/mrunalp/fileutils master github.com/vishvananda/netlink master @@ -113,3 +113,6 @@ github.com/hashicorp/errwrap 7554cd9344cec97297fa6649b055a8c98c2a1e55 github.com/pquerna/ffjson d49c2bc1aa135aad0c6f4fc2056623ec78f5d5ac github.com/stretchr/testify 4d4bfba8f1d1027c4fdbe371823030df51419987 github.com/pmezard/go-difflib v1.0.0 +github.com/xeipuuv/gojsonreference master +github.com/xeipuuv/gojsonschema master +github.com/xeipuuv/gojsonpointer master diff --git a/vendor/github.com/opencontainers/runtime-tools/README.md b/vendor/github.com/opencontainers/runtime-tools/README.md index bbcafc26..5f16436b 100644 --- a/vendor/github.com/opencontainers/runtime-tools/README.md +++ b/vendor/github.com/opencontainers/runtime-tools/README.md @@ -8,7 +8,7 @@ To build from source code, runtime-tools requires Go 1.7.x or above. [`oci-runtime-tool generate`][generate.1] generates [configuration JSON][config.json] for an [OCI bundle][bundle]. [OCI-compatible runtimes][runtime-spec] like [runC][] expect to read the configuration from `config.json`. -```sh +```console $ oci-runtime-tool generate --output config.json $ cat config.json { @@ -22,7 +22,7 @@ $ cat config.json [`oci-runtime-tool validate`][validate.1] validates an OCI bundle. The error message will be printed if the OCI bundle failed the validation procedure. -```sh +```console $ oci-runtime-tool generate $ oci-runtime-tool validate INFO[0000] Bundle validation succeeded. @@ -30,55 +30,153 @@ INFO[0000] Bundle validation succeeded. ## Testing OCI runtimes -```sh -$ sudo make RUNTIME=runc localvalidation -RUNTIME=runc go test -tags "" -v github.com/opencontainers/runtime-tools/validation -=== RUN TestValidateBasic +The runtime validation suite uses [node-tap][], which is packaged for some distributions (for example, it is in [Debian's `node-tap` package][debian-node-tap]). +If your distribution does not package node-tap, you can install [npm][] (for example, from [Gentoo's `nodejs` package][gentoo-nodejs]) and use it: + +```console +$ npm install tap +``` + +```console +$ make runtimetest validation-executables +RUNTIME=runc tap validation/linux_rootfs_propagation_shared.t validation/create.t validation/default.t validation/linux_readonly_paths.t validation/linux_masked_paths.t validation/mounts.t validation/process.t validation/root_readonly_false.t validation/linux_sysctl.t validation/linux_devices.t validation/linux_gid_mappings.t validation/process_oom_score_adj.t validation/process_capabilities.t validation/process_rlimits.t validation/root_readonly_true.t validation/linux_rootfs_propagation_unbindable.t validation/hostname.t validation/linux_uid_mappings.t +validation/linux_rootfs_propagation_shared.t ........ 18/19 + not ok rootfs propagation + +validation/create.t ................................... 4/4 +validation/default.t ................................ 19/19 +validation/linux_readonly_paths.t ................... 19/19 +validation/linux_masked_paths.t ..................... 18/19 + not ok masked paths + +validation/mounts.t ................................... 0/1 + Skipped: 1 + TODO: mounts generation options have not been implemented + +validation/process.t ................................ 19/19 +validation/root_readonly_false.t .................... 19/19 +validation/linux_sysctl.t ........................... 19/19 +validation/linux_devices.t .......................... 19/19 +validation/linux_gid_mappings.t ..................... 18/19 + not ok gid mappings + +validation/process_oom_score_adj.t .................. 19/19 +validation/process_capabilities.t ................... 19/19 +validation/process_rlimits.t ........................ 19/19 +validation/root_readonly_true.t ...................failed to create the container +rootfsPropagation=unbindable is not supported +exit status 1 +validation/root_readonly_true.t ..................... 19/19 +validation/linux_rootfs_propagation_unbindable.t ...... 0/1 + not ok validation/linux_rootfs_propagation_unbindable.t + timeout: 30000 + file: validation/linux_rootfs_propagation_unbindable.t + command: validation/linux_rootfs_propagation_unbindable.t + args: [] + stdio: + - 0 + - pipe + - 2 + cwd: /…/go/src/github.com/opencontainers/runtime-tools + exitCode: 1 + +validation/hostname.t ...................failed to create the container +User namespace mappings specified, but USER namespace isn't enabled in the config +exit status 1 +validation/hostname.t ............................... 19/19 +validation/linux_uid_mappings.t ....................... 0/1 + not ok validation/linux_uid_mappings.t + timeout: 30000 + file: validation/linux_uid_mappings.t + command: validation/linux_uid_mappings.t + args: [] + stdio: + - 0 + - pipe + - 2 + cwd: /…/go/src/github.com/opencontainers/runtime-tools + exitCode: 1 + +total ............................................. 267/273 + + + 267 passing (31s) + 1 pending + 5 failing + +make: *** [Makefile:43: localvalidation] Error 1 +``` + +You can also run an individual test executable directly: + +```console +$ RUNTIME=runc validation/default.t TAP version 13 ok 1 - root filesystem ok 2 - hostname -ok 3 - mounts -ok 4 - capabilities -ok 5 - default symlinks -ok 6 - default devices -ok 7 - linux devices -ok 8 - linux process -ok 9 - masked paths -ok 10 - oom score adj -ok 11 - read only paths -ok 12 - rlimits -ok 13 - sysctls -ok 14 - uid mappings -ok 15 - gid mappings -1..15 ---- PASS: TestValidateBasic (0.08s) -=== RUN TestValidateSysctls -TAP version 13 -ok 1 - root filesystem -ok 2 - hostname -ok 3 - mounts -ok 4 - capabilities -ok 5 - default symlinks -ok 6 - default devices -ok 7 - linux devices -ok 8 - linux process -ok 9 - masked paths -ok 10 - oom score adj -ok 11 - read only paths -ok 12 - rlimits -ok 13 - sysctls -ok 14 - uid mappings -ok 15 - gid mappings -1..15 ---- PASS: TestValidateSysctls (0.20s) -PASS -ok github.com/opencontainers/runtime-tools/validation 0.281s +ok 3 - process +ok 4 - mounts +ok 5 - user +ok 6 - rlimits +ok 7 - capabilities +ok 8 - default symlinks +ok 9 - default file system +ok 10 - default devices +ok 11 - linux devices +ok 12 - linux process +ok 13 - masked paths +ok 14 - oom score adj +ok 15 - read only paths +ok 16 - rootfs propagation +ok 17 - sysctls +ok 18 - uid mappings +ok 19 - gid mappings +1..19 +``` + +If you cannot install node-tap, you can probably run the test suite with another [TAP consumer][tap-consumers]. +For example, with [`prove`][prove]: + +```console +$ sudo make TAP='prove -Q -j9' RUNTIME=runc localvalidation +RUNTIME=runc prove -Q -j9 validation/linux_rootfs_propagation_shared.t validation/create.t validation/default.t validation/linux_readonly_paths.t validation/linux_masked_paths.t validation/mounts.t validation/process.t validation/root_readonly_false.t validation/linux_sysctl.t validation/linux_devices.t validation/linux_gid_mappings.t validation/process_oom_score_adj.t validation/process_capabilities.t validation/process_rlimits.t validation/root_readonly_true.t validation/linux_rootfs_propagation_unbindable.t validation/hostname.t validation/linux_uid_mappings.t +failed to create the container +rootfsPropagation=unbindable is not supported +exit status 1 +failed to create the container +User namespace mappings specified, but USER namespace isn't enabled in the config +exit status 1 + +Test Summary Report +------------------- +validation/linux_rootfs_propagation_shared.t (Wstat: 0 Tests: 19 Failed: 1) + Failed test: 16 +validation/linux_masked_paths.t (Wstat: 0 Tests: 19 Failed: 1) + Failed test: 13 +validation/linux_rootfs_propagation_unbindable.t (Wstat: 256 Tests: 0 Failed: 0) + Non-zero exit status: 1 + Parse errors: No plan found in TAP output +validation/linux_uid_mappings.t (Wstat: 256 Tests: 0 Failed: 0) + Non-zero exit status: 1 + Parse errors: No plan found in TAP output +validation/linux_gid_mappings.t (Wstat: 0 Tests: 19 Failed: 1) + Failed test: 19 +Files=18, Tests=271, 6 wallclock secs ( 0.06 usr 0.01 sys + 0.59 cusr 0.24 csys = 0.90 CPU) +Result: FAIL +make: *** [Makefile:43: localvalidation] Error 1 ``` [bundle]: https://github.com/opencontainers/runtime-spec/blob/master/bundle.md [config.json]: https://github.com/opencontainers/runtime-spec/blob/master/config.md +[debian-node-tap]: https://packages.debian.org/stretch/node-tap +[debian-nodejs]: https://packages.debian.org/stretch/nodejs +[gentoo-nodejs]: https://packages.gentoo.org/packages/net-libs/nodejs +[node-tap]: http://www.node-tap.org/ +[npm]: https://www.npmjs.com/ +[prove]: http://search.cpan.org/~leont/Test-Harness-3.39/bin/prove [runC]: https://github.com/opencontainers/runc [runtime-spec]: https://github.com/opencontainers/runtime-spec +[tap-consumers]: https://testanything.org/consumers.html [generate.1]: man/oci-runtime-tool-generate.1.md [validate.1]: man/oci-runtime-tool-validate.1.md diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go b/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go new file mode 100644 index 00000000..e4ab7453 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/abs.go @@ -0,0 +1,48 @@ +package filepath + +import ( + "regexp" + "strings" +) + +var windowsAbs = regexp.MustCompile(`^[a-zA-Z]:\\.*$`) + +// Abs is a version of path/filepath's Abs with an explicit operating +// system and current working directory. +func Abs(os, path, cwd string) (_ string, err error) { + if IsAbs(os, path) { + return Clean(os, path), nil + } + return Clean(os, Join(os, cwd, path)), nil +} + +// IsAbs is a version of path/filepath's IsAbs with an explicit +// operating system. +func IsAbs(os, path string) bool { + if os == "windows" { + // FIXME: copy hideous logic from Go's + // src/path/filepath/path_windows.go into somewhere where we can + // put 3-clause BSD licensed code. + return windowsAbs.MatchString(path) + } + sep := Separator(os) + + // POSIX has [1]: + // + // > If a pathname begins with two successive characters, + // > the first component following the leading characters + // > may be interpreted in an implementation-defined manner, + // > although more than two leading characters shall be + // > treated as a single character. + // + // And Boost treats // as non-absolute [2], but Linux [3,4], Python + // [5] and Go [6] all treat // as absolute. + // + // [1]: http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap04.html#tag_04_13 + // [2]: https://github.com/boostorg/filesystem/blob/boost-1.64.0/test/path_test.cpp#L861 + // [3]: http://man7.org/linux/man-pages/man7/path_resolution.7.html + // [4]: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/path-lookup.md?h=v4.12#n41 + // [5]: https://github.com/python/cpython/blob/v3.6.1/Lib/posixpath.py#L64-L66 + // [6]: https://go.googlesource.com/go/+/go1.8.3/src/path/path.go#199 + return strings.HasPrefix(path, string(sep)) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go b/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go new file mode 100644 index 00000000..896cd820 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/ancestor.go @@ -0,0 +1,32 @@ +package filepath + +import ( + "fmt" + "strings" +) + +// IsAncestor returns true when pathB is an strict ancestor of pathA, +// and false where the paths are equal or pathB is outside of pathA. +// Paths that are not absolute will be made absolute with Abs. +func IsAncestor(os, pathA, pathB, cwd string) (_ bool, err error) { + if pathA == pathB { + return false, nil + } + + pathA, err = Abs(os, pathA, cwd) + if err != nil { + return false, err + } + pathB, err = Abs(os, pathB, cwd) + if err != nil { + return false, err + } + sep := Separator(os) + if !strings.HasSuffix(pathA, string(sep)) { + pathA = fmt.Sprintf("%s%c", pathA, sep) + } + if pathA == pathB { + return false, nil + } + return strings.HasPrefix(pathB, pathA), nil +} diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go b/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go new file mode 100644 index 00000000..d5dd65ae --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/clean.go @@ -0,0 +1,74 @@ +package filepath + +import ( + "fmt" + "strings" +) + +// Clean is an explicit-OS version of path/filepath's Clean. +func Clean(os, path string) string { + abs := IsAbs(os, path) + sep := Separator(os) + elements := strings.Split(path, string(sep)) + + // Replace multiple Separator elements with a single one. + for i := 0; i < len(elements); i++ { + if len(elements[i]) == 0 { + elements = append(elements[:i], elements[i+1:]...) + i-- + } + } + + // Eliminate each . path name element (the current directory). + for i := 0; i < len(elements); i++ { + if elements[i] == "." && len(elements) > 1 { + elements = append(elements[:i], elements[i+1:]...) + i-- + } + } + + // Eliminate each inner .. path name element (the parent directory) + // along with the non-.. element that precedes it. + for i := 1; i < len(elements); i++ { + if i == 1 && abs && sep == '\\' { + continue + } + if i > 0 && elements[i] == ".." { + elements = append(elements[:i-1], elements[i+1:]...) + i -= 2 + } + } + + // Eliminate .. elements that begin a rooted path: + // that is, replace "/.." by "/" at the beginning of a path, + // assuming Separator is '/'. + offset := 0 + if sep == '\\' { + offset = 1 + } + if abs { + for len(elements) > offset && elements[offset] == ".." { + elements = append(elements[:offset], elements[offset+1:]...) + } + } + + cleaned := strings.Join(elements, string(sep)) + if abs { + if sep == '/' { + cleaned = fmt.Sprintf("%c%s", sep, cleaned) + } else if len(elements) == 1 { + cleaned = fmt.Sprintf("%s%c", cleaned, sep) + } + } + + // If the result of this process is an empty string, Clean returns + // the string ".". + if len(cleaned) == 0 { + cleaned = "." + } + + if cleaned == path { + return path + } + return Clean(os, cleaned) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go b/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go new file mode 100644 index 00000000..7ee085bf --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/doc.go @@ -0,0 +1,6 @@ +// Package filepath implements Go's filepath package with explicit +// operating systems (and for some functions and explicit working +// directory). This allows tools built for one OS to operate on paths +// targeting another OS. For example, a Linux build can determine +// whether a path is absolute on Linux or on Windows. +package filepath diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/join.go b/vendor/github.com/opencontainers/runtime-tools/filepath/join.go new file mode 100644 index 00000000..b865d237 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/join.go @@ -0,0 +1,9 @@ +package filepath + +import "strings" + +// Join is an explicit-OS version of path/filepath's Join. +func Join(os string, elem ...string) string { + sep := Separator(os) + return Clean(os, strings.Join(elem, string(sep))) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go b/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go new file mode 100644 index 00000000..2c5e8905 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/filepath/separator.go @@ -0,0 +1,9 @@ +package filepath + +// Separator is an explicit-OS version of path/filepath's Separator. +func Separator(os string) rune { + if os == "windows" { + return '\\' + } + return '/' +} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go index fce88f5e..d2951b52 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/generate.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/generate.go @@ -17,6 +17,12 @@ import ( var ( // Namespaces include the names of supported namespaces. Namespaces = []string{"network", "pid", "mount", "ipc", "uts", "user", "cgroup"} + + // we don't care about order...and this is way faster... + removeFunc = func(s []string, i int) []string { + s[i] = s[len(s)-1] + return s[:len(s)-1] + } ) // Generator represents a generator for a container spec. @@ -35,7 +41,7 @@ func New() Generator { spec := rspec.Spec{ Version: rspec.Version, Root: &rspec.Root{ - Path: "", + Path: "rootfs", Readonly: false, }, Process: &rspec.Process{ @@ -227,6 +233,7 @@ func NewFromFile(path string) (Generator, error) { if os.IsNotExist(err) { return Generator{}, fmt.Errorf("template configuration at %s not found", path) } + return Generator{}, err } defer cf.Close() @@ -392,7 +399,7 @@ func (g *Generator) SetProcessArgs(args []string) { // ClearProcessEnv clears g.spec.Process.Env. func (g *Generator) ClearProcessEnv() { - if g.spec == nil { + if g.spec == nil || g.spec.Process == nil { return } g.spec.Process.Env = []string{} @@ -433,22 +440,21 @@ func (g *Generator) AddProcessRlimits(rType string, rHard uint64, rSoft uint64) } // RemoveProcessRlimits removes a rlimit from g.spec.Process.Rlimits. -func (g *Generator) RemoveProcessRlimits(rType string) error { - if g.spec == nil { - return nil +func (g *Generator) RemoveProcessRlimits(rType string) { + if g.spec == nil || g.spec.Process == nil { + return } for i, rlimit := range g.spec.Process.Rlimits { if rlimit.Type == rType { g.spec.Process.Rlimits = append(g.spec.Process.Rlimits[:i], g.spec.Process.Rlimits[i+1:]...) - return nil + return } } - return nil } // ClearProcessRlimits clear g.spec.Process.Rlimits. func (g *Generator) ClearProcessRlimits() { - if g.spec == nil { + if g.spec == nil || g.spec.Process == nil { return } g.spec.Process.Rlimits = []rspec.POSIXRlimit{} @@ -456,7 +462,7 @@ func (g *Generator) ClearProcessRlimits() { // ClearProcessAdditionalGids clear g.spec.Process.AdditionalGids. func (g *Generator) ClearProcessAdditionalGids() { - if g.spec == nil { + if g.spec == nil || g.spec.Process == nil { return } g.spec.Process.User.AdditionalGids = []uint32{} @@ -485,6 +491,12 @@ func (g *Generator) SetLinuxCgroupsPath(path string) { g.spec.Linux.CgroupsPath = path } +// SetLinuxIntelRdtL3CacheSchema sets g.spec.Linux.IntelRdt.L3CacheSchema +func (g *Generator) SetLinuxIntelRdtL3CacheSchema(schema string) { + g.initSpecLinuxIntelRdt() + g.spec.Linux.IntelRdt.L3CacheSchema = schema +} + // SetLinuxMountLabel sets g.spec.Linux.MountLabel. func (g *Generator) SetLinuxMountLabel(label string) { g.initSpecLinux() @@ -497,6 +509,162 @@ func (g *Generator) SetProcessOOMScoreAdj(adj int) { g.spec.Process.OOMScoreAdj = &adj } +// SetLinuxResourcesBlockIOLeafWeight sets g.spec.Linux.Resources.BlockIO.LeafWeight. +func (g *Generator) SetLinuxResourcesBlockIOLeafWeight(weight uint16) { + g.initSpecLinuxResourcesBlockIO() + g.spec.Linux.Resources.BlockIO.LeafWeight = &weight +} + +// AddLinuxResourcesBlockIOLeafWeightDevice adds or sets g.spec.Linux.Resources.BlockIO.WeightDevice.LeafWeight. +func (g *Generator) AddLinuxResourcesBlockIOLeafWeightDevice(major int64, minor int64, weight uint16) { + g.initSpecLinuxResourcesBlockIO() + for i, weightDevice := range g.spec.Linux.Resources.BlockIO.WeightDevice { + if weightDevice.Major == major && weightDevice.Minor == minor { + g.spec.Linux.Resources.BlockIO.WeightDevice[i].LeafWeight = &weight + return + } + } + weightDevice := new(rspec.LinuxWeightDevice) + weightDevice.Major = major + weightDevice.Minor = minor + weightDevice.LeafWeight = &weight + g.spec.Linux.Resources.BlockIO.WeightDevice = append(g.spec.Linux.Resources.BlockIO.WeightDevice, *weightDevice) +} + +// DropLinuxResourcesBlockIOLeafWeightDevice drops a item form g.spec.Linux.Resources.BlockIO.WeightDevice.LeafWeight +func (g *Generator) DropLinuxResourcesBlockIOLeafWeightDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + for i, weightDevice := range g.spec.Linux.Resources.BlockIO.WeightDevice { + if weightDevice.Major == major && weightDevice.Minor == minor { + if weightDevice.Weight != nil { + newWeightDevice := new(rspec.LinuxWeightDevice) + newWeightDevice.Major = major + newWeightDevice.Minor = minor + newWeightDevice.Weight = weightDevice.Weight + g.spec.Linux.Resources.BlockIO.WeightDevice[i] = *newWeightDevice + } else { + g.spec.Linux.Resources.BlockIO.WeightDevice = append(g.spec.Linux.Resources.BlockIO.WeightDevice[:i], g.spec.Linux.Resources.BlockIO.WeightDevice[i+1:]...) + } + return + } + } +} + +// SetLinuxResourcesBlockIOWeight sets g.spec.Linux.Resources.BlockIO.Weight. +func (g *Generator) SetLinuxResourcesBlockIOWeight(weight uint16) { + g.initSpecLinuxResourcesBlockIO() + g.spec.Linux.Resources.BlockIO.Weight = &weight +} + +// AddLinuxResourcesBlockIOWeightDevice adds or sets g.spec.Linux.Resources.BlockIO.WeightDevice.Weight. +func (g *Generator) AddLinuxResourcesBlockIOWeightDevice(major int64, minor int64, weight uint16) { + g.initSpecLinuxResourcesBlockIO() + for i, weightDevice := range g.spec.Linux.Resources.BlockIO.WeightDevice { + if weightDevice.Major == major && weightDevice.Minor == minor { + g.spec.Linux.Resources.BlockIO.WeightDevice[i].Weight = &weight + return + } + } + weightDevice := new(rspec.LinuxWeightDevice) + weightDevice.Major = major + weightDevice.Minor = minor + weightDevice.Weight = &weight + g.spec.Linux.Resources.BlockIO.WeightDevice = append(g.spec.Linux.Resources.BlockIO.WeightDevice, *weightDevice) +} + +// DropLinuxResourcesBlockIOWeightDevice drops a item form g.spec.Linux.Resources.BlockIO.WeightDevice.Weight +func (g *Generator) DropLinuxResourcesBlockIOWeightDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + for i, weightDevice := range g.spec.Linux.Resources.BlockIO.WeightDevice { + if weightDevice.Major == major && weightDevice.Minor == minor { + if weightDevice.LeafWeight != nil { + newWeightDevice := new(rspec.LinuxWeightDevice) + newWeightDevice.Major = major + newWeightDevice.Minor = minor + newWeightDevice.LeafWeight = weightDevice.LeafWeight + g.spec.Linux.Resources.BlockIO.WeightDevice[i] = *newWeightDevice + } else { + g.spec.Linux.Resources.BlockIO.WeightDevice = append(g.spec.Linux.Resources.BlockIO.WeightDevice[:i], g.spec.Linux.Resources.BlockIO.WeightDevice[i+1:]...) + } + return + } + } +} + +// AddLinuxResourcesBlockIOThrottleReadBpsDevice adds or sets g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice. +func (g *Generator) AddLinuxResourcesBlockIOThrottleReadBpsDevice(major int64, minor int64, rate uint64) { + g.initSpecLinuxResourcesBlockIO() + throttleDevices := addOrReplaceBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice, major, minor, rate) + g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice = throttleDevices +} + +// DropLinuxResourcesBlockIOThrottleReadBpsDevice drops a item from g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice. +func (g *Generator) DropLinuxResourcesBlockIOThrottleReadBpsDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + throttleDevices := dropBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice, major, minor) + g.spec.Linux.Resources.BlockIO.ThrottleReadBpsDevice = throttleDevices +} + +// AddLinuxResourcesBlockIOThrottleReadIOPSDevice adds or sets g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice. +func (g *Generator) AddLinuxResourcesBlockIOThrottleReadIOPSDevice(major int64, minor int64, rate uint64) { + g.initSpecLinuxResourcesBlockIO() + throttleDevices := addOrReplaceBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice, major, minor, rate) + g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice = throttleDevices +} + +// DropLinuxResourcesBlockIOThrottleReadIOPSDevice drops a item from g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice. +func (g *Generator) DropLinuxResourcesBlockIOThrottleReadIOPSDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + throttleDevices := dropBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice, major, minor) + g.spec.Linux.Resources.BlockIO.ThrottleReadIOPSDevice = throttleDevices +} + +// AddLinuxResourcesBlockIOThrottleWriteBpsDevice adds or sets g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice. +func (g *Generator) AddLinuxResourcesBlockIOThrottleWriteBpsDevice(major int64, minor int64, rate uint64) { + g.initSpecLinuxResourcesBlockIO() + throttleDevices := addOrReplaceBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice, major, minor, rate) + g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice = throttleDevices +} + +// DropLinuxResourcesBlockIOThrottleWriteBpsDevice drops a item from g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice. +func (g *Generator) DropLinuxResourcesBlockIOThrottleWriteBpsDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + throttleDevices := dropBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice, major, minor) + g.spec.Linux.Resources.BlockIO.ThrottleWriteBpsDevice = throttleDevices +} + +// AddLinuxResourcesBlockIOThrottleWriteIOPSDevice adds or sets g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice. +func (g *Generator) AddLinuxResourcesBlockIOThrottleWriteIOPSDevice(major int64, minor int64, rate uint64) { + g.initSpecLinuxResourcesBlockIO() + throttleDevices := addOrReplaceBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice, major, minor, rate) + g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice = throttleDevices +} + +// DropLinuxResourcesBlockIOThrottleWriteIOPSDevice drops a item from g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice. +func (g *Generator) DropLinuxResourcesBlockIOThrottleWriteIOPSDevice(major int64, minor int64) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.BlockIO == nil { + return + } + + throttleDevices := dropBlockIOThrottleDevice(g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice, major, minor) + g.spec.Linux.Resources.BlockIO.ThrottleWriteIOPSDevice = throttleDevices +} + // SetLinuxResourcesCPUShares sets g.spec.Linux.Resources.CPU.Shares. func (g *Generator) SetLinuxResourcesCPUShares(shares uint64) { g.initSpecLinuxResourcesCPU() @@ -557,16 +725,17 @@ func (g *Generator) AddLinuxResourcesHugepageLimit(pageSize string, limit uint64 } // DropLinuxResourcesHugepageLimit drops a hugepage limit from g.spec.Linux.Resources.HugepageLimits. -func (g *Generator) DropLinuxResourcesHugepageLimit(pageSize string) error { - g.initSpecLinuxResources() +func (g *Generator) DropLinuxResourcesHugepageLimit(pageSize string) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil { + return + } + for i, pageLimit := range g.spec.Linux.Resources.HugepageLimits { if pageLimit.Pagesize == pageSize { g.spec.Linux.Resources.HugepageLimits = append(g.spec.Linux.Resources.HugepageLimits[:i], g.spec.Linux.Resources.HugepageLimits[i+1:]...) - return nil + return } } - - return nil } // SetLinuxResourcesMemoryLimit sets g.spec.Linux.Resources.Memory.Limit. @@ -634,7 +803,10 @@ func (g *Generator) AddLinuxResourcesNetworkPriorities(name string, prio uint32) // DropLinuxResourcesNetworkPriorities drops one item from g.spec.Linux.Resources.Network.Priorities. func (g *Generator) DropLinuxResourcesNetworkPriorities(name string) { - g.initSpecLinuxResourcesNetwork() + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil || g.spec.Linux.Resources.Network == nil { + return + } + for i, netPriority := range g.spec.Linux.Resources.Network.Priorities { if netPriority.Name == name { g.spec.Linux.Resources.Network.Priorities = append(g.spec.Linux.Resources.Network.Priorities[:i], g.spec.Linux.Resources.Network.Priorities[i+1:]...) @@ -721,8 +893,10 @@ func (g *Generator) SetLinuxRootPropagation(rp string) error { case "rslave": case "shared": case "rshared": + case "unbindable": + case "runbindable": default: - return fmt.Errorf("rootfs-propagation must be empty or one of private|rprivate|slave|rslave|shared|rshared") + return fmt.Errorf("rootfs-propagation %q must be empty or one of (r)private|(r)slave|(r)shared|(r)unbindable", rp) } g.initSpecLinux() g.spec.Linux.RootfsPropagation = rp @@ -731,217 +905,99 @@ func (g *Generator) SetLinuxRootPropagation(rp string) error { // ClearPreStartHooks clear g.spec.Hooks.Prestart. func (g *Generator) ClearPreStartHooks() { - if g.spec == nil { - return - } - if g.spec.Hooks == nil { + if g.spec == nil || g.spec.Hooks == nil { return } g.spec.Hooks.Prestart = []rspec.Hook{} } // AddPreStartHook add a prestart hook into g.spec.Hooks.Prestart. -func (g *Generator) AddPreStartHook(path string, args []string) { - g.initSpecHooks() - hook := rspec.Hook{Path: path, Args: args} - for i, hook := range g.spec.Hooks.Prestart { - if hook.Path == path { - g.spec.Hooks.Prestart[i] = hook - return - } - } - g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook) -} - -// AddPreStartHookEnv adds envs of a prestart hook into g.spec.Hooks.Prestart. -func (g *Generator) AddPreStartHookEnv(path string, envs []string) { +func (g *Generator) AddPreStartHook(preStartHook rspec.Hook) error { g.initSpecHooks() for i, hook := range g.spec.Hooks.Prestart { - if hook.Path == path { - g.spec.Hooks.Prestart[i].Env = envs - return + if hook.Path == preStartHook.Path { + g.spec.Hooks.Prestart[i] = preStartHook + return nil } } - hook := rspec.Hook{Path: path, Env: envs} - g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook) -} - -// AddPreStartHookTimeout adds timeout of a prestart hook into g.spec.Hooks.Prestart. -func (g *Generator) AddPreStartHookTimeout(path string, timeout int) { - g.initSpecHooks() - for i, hook := range g.spec.Hooks.Prestart { - if hook.Path == path { - g.spec.Hooks.Prestart[i].Timeout = &timeout - return - } - } - hook := rspec.Hook{Path: path, Timeout: &timeout} - g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, hook) + g.spec.Hooks.Prestart = append(g.spec.Hooks.Prestart, preStartHook) + return nil } // ClearPostStopHooks clear g.spec.Hooks.Poststop. func (g *Generator) ClearPostStopHooks() { - if g.spec == nil { - return - } - if g.spec.Hooks == nil { + if g.spec == nil || g.spec.Hooks == nil { return } g.spec.Hooks.Poststop = []rspec.Hook{} } // AddPostStopHook adds a poststop hook into g.spec.Hooks.Poststop. -func (g *Generator) AddPostStopHook(path string, args []string) { - g.initSpecHooks() - hook := rspec.Hook{Path: path, Args: args} - for i, hook := range g.spec.Hooks.Poststop { - if hook.Path == path { - g.spec.Hooks.Poststop[i] = hook - return - } - } - g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook) -} - -// AddPostStopHookEnv adds envs of a poststop hook into g.spec.Hooks.Poststop. -func (g *Generator) AddPostStopHookEnv(path string, envs []string) { +func (g *Generator) AddPostStopHook(postStopHook rspec.Hook) error { g.initSpecHooks() for i, hook := range g.spec.Hooks.Poststop { - if hook.Path == path { - g.spec.Hooks.Poststop[i].Env = envs - return + if hook.Path == postStopHook.Path { + g.spec.Hooks.Poststop[i] = postStopHook + return nil } } - hook := rspec.Hook{Path: path, Env: envs} - g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook) -} - -// AddPostStopHookTimeout adds timeout of a poststop hook into g.spec.Hooks.Poststop. -func (g *Generator) AddPostStopHookTimeout(path string, timeout int) { - g.initSpecHooks() - for i, hook := range g.spec.Hooks.Poststop { - if hook.Path == path { - g.spec.Hooks.Poststop[i].Timeout = &timeout - return - } - } - hook := rspec.Hook{Path: path, Timeout: &timeout} - g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, hook) + g.spec.Hooks.Poststop = append(g.spec.Hooks.Poststop, postStopHook) + return nil } // ClearPostStartHooks clear g.spec.Hooks.Poststart. func (g *Generator) ClearPostStartHooks() { - if g.spec == nil { - return - } - if g.spec.Hooks == nil { + if g.spec == nil || g.spec.Hooks == nil { return } g.spec.Hooks.Poststart = []rspec.Hook{} } // AddPostStartHook adds a poststart hook into g.spec.Hooks.Poststart. -func (g *Generator) AddPostStartHook(path string, args []string) { - g.initSpecHooks() - hook := rspec.Hook{Path: path, Args: args} - for i, hook := range g.spec.Hooks.Poststart { - if hook.Path == path { - g.spec.Hooks.Poststart[i] = hook - return - } - } - g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook) -} - -// AddPostStartHookEnv adds envs of a poststart hook into g.spec.Hooks.Poststart. -func (g *Generator) AddPostStartHookEnv(path string, envs []string) { +func (g *Generator) AddPostStartHook(postStartHook rspec.Hook) error { g.initSpecHooks() for i, hook := range g.spec.Hooks.Poststart { - if hook.Path == path { - g.spec.Hooks.Poststart[i].Env = envs - return + if hook.Path == postStartHook.Path { + g.spec.Hooks.Poststart[i] = postStartHook + return nil } } - hook := rspec.Hook{Path: path, Env: envs} - g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook) -} - -// AddPostStartHookTimeout adds timeout of a poststart hook into g.spec.Hooks.Poststart. -func (g *Generator) AddPostStartHookTimeout(path string, timeout int) { - g.initSpecHooks() - for i, hook := range g.spec.Hooks.Poststart { - if hook.Path == path { - g.spec.Hooks.Poststart[i].Timeout = &timeout - return - } - } - hook := rspec.Hook{Path: path, Timeout: &timeout} - g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, hook) -} - -// AddTmpfsMount adds a tmpfs mount into g.spec.Mounts. -func (g *Generator) AddTmpfsMount(dest string, options []string) { - mnt := rspec.Mount{ - Destination: dest, - Type: "tmpfs", - Source: "tmpfs", - Options: options, - } - - g.initSpec() - g.spec.Mounts = append(g.spec.Mounts, mnt) -} - -// AddCgroupsMount adds a cgroup mount into g.spec.Mounts. -func (g *Generator) AddCgroupsMount(mountCgroupOption string) error { - switch mountCgroupOption { - case "ro": - case "rw": - case "no": - return nil - default: - return fmt.Errorf("--mount-cgroups should be one of (ro,rw,no)") - } - - mnt := rspec.Mount{ - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"nosuid", "noexec", "nodev", "relatime", mountCgroupOption}, - } - g.initSpec() - g.spec.Mounts = append(g.spec.Mounts, mnt) - + g.spec.Hooks.Poststart = append(g.spec.Hooks.Poststart, postStartHook) return nil } -// AddBindMount adds a bind mount into g.spec.Mounts. -func (g *Generator) AddBindMount(source, dest string, options []string) { - if len(options) == 0 { - options = []string{"rw"} - } +// AddMount adds a mount into g.spec.Mounts. +func (g *Generator) AddMount(mnt rspec.Mount) { + g.initSpec() - // We have to make sure that there is a bind option set, otherwise it won't - // be an actual bindmount. - foundBindOption := false - for _, opt := range options { - if opt == "bind" || opt == "rbind" { - foundBindOption = true - break + g.spec.Mounts = append(g.spec.Mounts, mnt) +} + +// RemoveMount removes a mount point on the dest directory +func (g *Generator) RemoveMount(dest string) { + g.initSpec() + + for index, mount := range g.spec.Mounts { + if mount.Destination == dest { + g.spec.Mounts = append(g.spec.Mounts[:index], g.spec.Mounts[index+1:]...) + return } } - if !foundBindOption { - options = append(options, "bind") - } +} - mnt := rspec.Mount{ - Destination: dest, - Type: "bind", - Source: source, - Options: options, - } +// Mounts returns the list of mounts +func (g *Generator) Mounts() []rspec.Mount { g.initSpec() - g.spec.Mounts = append(g.spec.Mounts, mnt) + + return g.spec.Mounts +} + +// ClearMounts clear g.spec.Mounts +func (g *Generator) ClearMounts() { + if g.spec == nil { + return + } + g.spec.Mounts = []rspec.Mount{} } // SetupPrivileged sets up the privilege-related fields inside g.spec. @@ -970,7 +1026,7 @@ func (g *Generator) SetupPrivileged(privileged bool) { // ClearProcessCapabilities clear g.spec.Process.Capabilities. func (g *Generator) ClearProcessCapabilities() { - if g.spec == nil { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { return } g.spec.Process.Capabilities.Bounding = []string{} @@ -980,8 +1036,32 @@ func (g *Generator) ClearProcessCapabilities() { g.spec.Process.Capabilities.Ambient = []string{} } -// AddProcessCapability adds a process capability into g.spec.Process.Capabilities. -func (g *Generator) AddProcessCapability(c string) error { +// AddProcessCapabilityAmbient adds a process capability into g.spec.Process.Capabilities.Ambient. +func (g *Generator) AddProcessCapabilityAmbient(c string) error { + cp := strings.ToUpper(c) + if err := validate.CapValid(cp, g.HostSpecific); err != nil { + return err + } + + g.initSpecProcessCapabilities() + + var foundAmbient bool + for _, cap := range g.spec.Process.Capabilities.Ambient { + if strings.ToUpper(cap) == cp { + foundAmbient = true + break + } + } + + if !foundAmbient { + g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, cp) + } + + return nil +} + +// AddProcessCapabilityBounding adds a process capability into g.spec.Process.Capabilities.Bounding. +func (g *Generator) AddProcessCapabilityBounding(c string) error { cp := strings.ToUpper(c) if err := validate.CapValid(cp, g.HostSpecific); err != nil { return err @@ -1000,6 +1080,18 @@ func (g *Generator) AddProcessCapability(c string) error { g.spec.Process.Capabilities.Bounding = append(g.spec.Process.Capabilities.Bounding, cp) } + return nil +} + +// AddProcessCapabilityEffective adds a process capability into g.spec.Process.Capabilities.Effective. +func (g *Generator) AddProcessCapabilityEffective(c string) error { + cp := strings.ToUpper(c) + if err := validate.CapValid(cp, g.HostSpecific); err != nil { + return err + } + + g.initSpecProcessCapabilities() + var foundEffective bool for _, cap := range g.spec.Process.Capabilities.Effective { if strings.ToUpper(cap) == cp { @@ -1011,6 +1103,18 @@ func (g *Generator) AddProcessCapability(c string) error { g.spec.Process.Capabilities.Effective = append(g.spec.Process.Capabilities.Effective, cp) } + return nil +} + +// AddProcessCapabilityInheritable adds a process capability into g.spec.Process.Capabilities.Inheritable. +func (g *Generator) AddProcessCapabilityInheritable(c string) error { + cp := strings.ToUpper(c) + if err := validate.CapValid(cp, g.HostSpecific); err != nil { + return err + } + + g.initSpecProcessCapabilities() + var foundInheritable bool for _, cap := range g.spec.Process.Capabilities.Inheritable { if strings.ToUpper(cap) == cp { @@ -1022,6 +1126,18 @@ func (g *Generator) AddProcessCapability(c string) error { g.spec.Process.Capabilities.Inheritable = append(g.spec.Process.Capabilities.Inheritable, cp) } + return nil +} + +// AddProcessCapabilityPermitted adds a process capability into g.spec.Process.Capabilities.Permitted. +func (g *Generator) AddProcessCapabilityPermitted(c string) error { + cp := strings.ToUpper(c) + if err := validate.CapValid(cp, g.HostSpecific); err != nil { + return err + } + + g.initSpecProcessCapabilities() + var foundPermitted bool for _, cap := range g.spec.Process.Capabilities.Permitted { if strings.ToUpper(cap) == cp { @@ -1033,66 +1149,87 @@ func (g *Generator) AddProcessCapability(c string) error { g.spec.Process.Capabilities.Permitted = append(g.spec.Process.Capabilities.Permitted, cp) } - var foundAmbient bool - for _, cap := range g.spec.Process.Capabilities.Ambient { - if strings.ToUpper(cap) == cp { - foundAmbient = true - break - } - } - if !foundAmbient { - g.spec.Process.Capabilities.Ambient = append(g.spec.Process.Capabilities.Ambient, cp) - } - return nil } -// DropProcessCapability drops a process capability from g.spec.Process.Capabilities. -func (g *Generator) DropProcessCapability(c string) error { +// DropProcessCapabilityAmbient drops a process capability from g.spec.Process.Capabilities.Ambient. +func (g *Generator) DropProcessCapabilityAmbient(c string) error { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { + return nil + } + cp := strings.ToUpper(c) - if err := validate.CapValid(cp, g.HostSpecific); err != nil { - return err - } - - g.initSpecProcessCapabilities() - - // we don't care about order...and this is way faster... - removeFunc := func(s []string, i int) []string { - s[i] = s[len(s)-1] - return s[:len(s)-1] - } - - for i, cap := range g.spec.Process.Capabilities.Bounding { - if strings.ToUpper(cap) == cp { - g.spec.Process.Capabilities.Bounding = removeFunc(g.spec.Process.Capabilities.Bounding, i) - } - } - - for i, cap := range g.spec.Process.Capabilities.Effective { - if strings.ToUpper(cap) == cp { - g.spec.Process.Capabilities.Effective = removeFunc(g.spec.Process.Capabilities.Effective, i) - } - } - - for i, cap := range g.spec.Process.Capabilities.Inheritable { - if strings.ToUpper(cap) == cp { - g.spec.Process.Capabilities.Inheritable = removeFunc(g.spec.Process.Capabilities.Inheritable, i) - } - } - - for i, cap := range g.spec.Process.Capabilities.Permitted { - if strings.ToUpper(cap) == cp { - g.spec.Process.Capabilities.Permitted = removeFunc(g.spec.Process.Capabilities.Permitted, i) - } - } - for i, cap := range g.spec.Process.Capabilities.Ambient { if strings.ToUpper(cap) == cp { g.spec.Process.Capabilities.Ambient = removeFunc(g.spec.Process.Capabilities.Ambient, i) } } - return nil + return validate.CapValid(cp, false) +} + +// DropProcessCapabilityBounding drops a process capability from g.spec.Process.Capabilities.Bounding. +func (g *Generator) DropProcessCapabilityBounding(c string) error { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { + return nil + } + + cp := strings.ToUpper(c) + for i, cap := range g.spec.Process.Capabilities.Bounding { + if strings.ToUpper(cap) == cp { + g.spec.Process.Capabilities.Bounding = removeFunc(g.spec.Process.Capabilities.Bounding, i) + } + } + + return validate.CapValid(cp, false) +} + +// DropProcessCapabilityEffective drops a process capability from g.spec.Process.Capabilities.Effective. +func (g *Generator) DropProcessCapabilityEffective(c string) error { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { + return nil + } + + cp := strings.ToUpper(c) + for i, cap := range g.spec.Process.Capabilities.Effective { + if strings.ToUpper(cap) == cp { + g.spec.Process.Capabilities.Effective = removeFunc(g.spec.Process.Capabilities.Effective, i) + } + } + + return validate.CapValid(cp, false) +} + +// DropProcessCapabilityInheritable drops a process capability from g.spec.Process.Capabilities.Inheritable. +func (g *Generator) DropProcessCapabilityInheritable(c string) error { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { + return nil + } + + cp := strings.ToUpper(c) + for i, cap := range g.spec.Process.Capabilities.Inheritable { + if strings.ToUpper(cap) == cp { + g.spec.Process.Capabilities.Inheritable = removeFunc(g.spec.Process.Capabilities.Inheritable, i) + } + } + + return validate.CapValid(cp, false) +} + +// DropProcessCapabilityPermitted drops a process capability from g.spec.Process.Capabilities.Permitted. +func (g *Generator) DropProcessCapabilityPermitted(c string) error { + if g.spec == nil || g.spec.Process == nil || g.spec.Process.Capabilities == nil { + return nil + } + + cp := strings.ToUpper(c) + for i, cap := range g.spec.Process.Capabilities.Permitted { + if strings.ToUpper(cap) == cp { + g.spec.Process.Capabilities.Ambient = removeFunc(g.spec.Process.Capabilities.Ambient, i) + } + } + + return validate.CapValid(cp, false) } func mapStrToNamespace(ns string, path string) (rspec.LinuxNamespace, error) { @@ -1180,18 +1317,17 @@ func (g *Generator) AddDevice(device rspec.LinuxDevice) { } // RemoveDevice remove a device from g.spec.Linux.Devices -func (g *Generator) RemoveDevice(path string) error { +func (g *Generator) RemoveDevice(path string) { if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Devices == nil { - return nil + return } for i, device := range g.spec.Linux.Devices { if device.Path == path { g.spec.Linux.Devices = append(g.spec.Linux.Devices[:i], g.spec.Linux.Devices[i+1:]...) - return nil + return } } - return nil } // ClearLinuxDevices clears g.spec.Linux.Devices @@ -1203,6 +1339,39 @@ func (g *Generator) ClearLinuxDevices() { g.spec.Linux.Devices = []rspec.LinuxDevice{} } +// AddLinuxResourcesDevice - add a device into g.spec.Linux.Resources.Devices +func (g *Generator) AddLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) { + g.initSpecLinuxResources() + + device := rspec.LinuxDeviceCgroup{ + Allow: allow, + Type: devType, + Access: access, + Major: major, + Minor: minor, + } + g.spec.Linux.Resources.Devices = append(g.spec.Linux.Resources.Devices, device) +} + +// RemoveLinuxResourcesDevice - remove a device from g.spec.Linux.Resources.Devices +func (g *Generator) RemoveLinuxResourcesDevice(allow bool, devType string, major, minor *int64, access string) { + if g.spec == nil || g.spec.Linux == nil || g.spec.Linux.Resources == nil { + return + } + for i, device := range g.spec.Linux.Resources.Devices { + if device.Allow == allow && + (devType == device.Type || (devType != "" && device.Type != "" && devType == device.Type)) && + (access == device.Access || (access != "" && device.Access != "" && access == device.Access)) && + (major == device.Major || (major != nil && device.Major != nil && *major == *device.Major)) && + (minor == device.Minor || (minor != nil && device.Minor != nil && *minor == *device.Minor)) { + + g.spec.Linux.Resources.Devices = append(g.spec.Linux.Resources.Devices[:i], g.spec.Linux.Resources.Devices[i+1:]...) + return + } + } + return +} + // strPtr returns the pointer pointing to the string s. func strPtr(s string) *string { return &s } @@ -1254,3 +1423,122 @@ func (g *Generator) AddLinuxReadonlyPaths(path string) { g.initSpecLinux() g.spec.Linux.ReadonlyPaths = append(g.spec.Linux.ReadonlyPaths, path) } + +func addOrReplaceBlockIOThrottleDevice(tmpList []rspec.LinuxThrottleDevice, major int64, minor int64, rate uint64) []rspec.LinuxThrottleDevice { + throttleDevices := tmpList + for i, throttleDevice := range throttleDevices { + if throttleDevice.Major == major && throttleDevice.Minor == minor { + throttleDevices[i].Rate = rate + return throttleDevices + } + } + throttleDevice := new(rspec.LinuxThrottleDevice) + throttleDevice.Major = major + throttleDevice.Minor = minor + throttleDevice.Rate = rate + throttleDevices = append(throttleDevices, *throttleDevice) + + return throttleDevices +} + +func dropBlockIOThrottleDevice(tmpList []rspec.LinuxThrottleDevice, major int64, minor int64) []rspec.LinuxThrottleDevice { + throttleDevices := tmpList + for i, throttleDevice := range throttleDevices { + if throttleDevice.Major == major && throttleDevice.Minor == minor { + throttleDevices = append(throttleDevices[:i], throttleDevices[i+1:]...) + return throttleDevices + } + } + + return throttleDevices +} + +// AddSolarisAnet adds network into g.spec.Solaris.Anet +func (g *Generator) AddSolarisAnet(anet rspec.SolarisAnet) { + g.initSpecSolaris() + g.spec.Solaris.Anet = append(g.spec.Solaris.Anet, anet) +} + +// SetSolarisCappedCPUNcpus sets g.spec.Solaris.CappedCPU.Ncpus +func (g *Generator) SetSolarisCappedCPUNcpus(ncpus string) { + g.initSpecSolarisCappedCPU() + g.spec.Solaris.CappedCPU.Ncpus = ncpus +} + +// SetSolarisCappedMemoryPhysical sets g.spec.Solaris.CappedMemory.Physical +func (g *Generator) SetSolarisCappedMemoryPhysical(physical string) { + g.initSpecSolarisCappedMemory() + g.spec.Solaris.CappedMemory.Physical = physical +} + +// SetSolarisCappedMemorySwap sets g.spec.Solaris.CappedMemory.Swap +func (g *Generator) SetSolarisCappedMemorySwap(swap string) { + g.initSpecSolarisCappedMemory() + g.spec.Solaris.CappedMemory.Swap = swap +} + +// SetSolarisLimitPriv sets g.spec.Solaris.LimitPriv +func (g *Generator) SetSolarisLimitPriv(limitPriv string) { + g.initSpecSolaris() + g.spec.Solaris.LimitPriv = limitPriv +} + +// SetSolarisMaxShmMemory sets g.spec.Solaris.MaxShmMemory +func (g *Generator) SetSolarisMaxShmMemory(memory string) { + g.initSpecSolaris() + g.spec.Solaris.MaxShmMemory = memory +} + +// SetSolarisMilestone sets g.spec.Solaris.Milestone +func (g *Generator) SetSolarisMilestone(milestone string) { + g.initSpecSolaris() + g.spec.Solaris.Milestone = milestone +} + +// SetWindowsHypervUntilityVMPath sets g.spec.Windows.HyperV.UtilityVMPath. +func (g *Generator) SetWindowsHypervUntilityVMPath(path string) { + g.initSpecWindowsHyperV() + g.spec.Windows.HyperV.UtilityVMPath = path +} + +// SetWinodwsIgnoreFlushesDuringBoot sets g.spec.Winodws.IgnoreFlushesDuringBoot. +func (g *Generator) SetWinodwsIgnoreFlushesDuringBoot(ignore bool) { + g.initSpecWindows() + g.spec.Windows.IgnoreFlushesDuringBoot = ignore +} + +// AddWindowsLayerFolders adds layer folders into g.spec.Windows.LayerFolders. +func (g *Generator) AddWindowsLayerFolders(folder string) { + g.initSpecWindows() + g.spec.Windows.LayerFolders = append(g.spec.Windows.LayerFolders, folder) +} + +// SetWindowsNetwork sets g.spec.Windows.Network. +func (g *Generator) SetWindowsNetwork(network rspec.WindowsNetwork) { + g.initSpecWindows() + g.spec.Windows.Network = &network +} + +// SetWindowsResourcesCPU sets g.spec.Windows.Resources.CPU. +func (g *Generator) SetWindowsResourcesCPU(cpu rspec.WindowsCPUResources) { + g.initSpecWindowsResources() + g.spec.Windows.Resources.CPU = &cpu +} + +// SetWindowsResourcesMemoryLimit sets g.spec.Windows.Resources.Memory.Limit. +func (g *Generator) SetWindowsResourcesMemoryLimit(limit uint64) { + g.initSpecWindowsResourcesMemory() + g.spec.Windows.Resources.Memory.Limit = &limit +} + +// SetWindowsResourcesStorage sets g.spec.Windows.Resources.Storage. +func (g *Generator) SetWindowsResourcesStorage(storage rspec.WindowsStorageResources) { + g.initSpecWindowsResources() + g.spec.Windows.Resources.Storage = &storage +} + +// SetWinodwsServicing sets g.spec.Winodws.Servicing. +func (g *Generator) SetWinodwsServicing(servicing bool) { + g.initSpecWindows() + g.spec.Windows.Servicing = servicing +} diff --git a/vendor/github.com/opencontainers/runtime-tools/generate/spec.go b/vendor/github.com/opencontainers/runtime-tools/generate/spec.go index 519d2544..d7a6da81 100644 --- a/vendor/github.com/opencontainers/runtime-tools/generate/spec.go +++ b/vendor/github.com/opencontainers/runtime-tools/generate/spec.go @@ -59,6 +59,13 @@ func (g *Generator) initSpecLinux() { } } +func (g *Generator) initSpecLinuxIntelRdt() { + g.initSpecLinux() + if g.spec.Linux.IntelRdt == nil { + g.spec.Linux.IntelRdt = &rspec.LinuxIntelRdt{} + } +} + func (g *Generator) initSpecLinuxSysctl() { g.initSpecLinux() if g.spec.Linux.Sysctl == nil { @@ -80,6 +87,13 @@ func (g *Generator) initSpecLinuxResources() { } } +func (g *Generator) initSpecLinuxResourcesBlockIO() { + g.initSpecLinuxResources() + if g.spec.Linux.Resources.BlockIO == nil { + g.spec.Linux.Resources.BlockIO = &rspec.LinuxBlockIO{} + } +} + func (g *Generator) initSpecLinuxResourcesCPU() { g.initSpecLinuxResources() if g.spec.Linux.Resources.CPU == nil { @@ -107,3 +121,52 @@ func (g *Generator) initSpecLinuxResourcesPids() { g.spec.Linux.Resources.Pids = &rspec.LinuxPids{} } } + +func (g *Generator) initSpecSolaris() { + g.initSpec() + if g.spec.Solaris == nil { + g.spec.Solaris = &rspec.Solaris{} + } +} + +func (g *Generator) initSpecSolarisCappedCPU() { + g.initSpecSolaris() + if g.spec.Solaris.CappedCPU == nil { + g.spec.Solaris.CappedCPU = &rspec.SolarisCappedCPU{} + } +} + +func (g *Generator) initSpecSolarisCappedMemory() { + g.initSpecSolaris() + if g.spec.Solaris.CappedMemory == nil { + g.spec.Solaris.CappedMemory = &rspec.SolarisCappedMemory{} + } +} + +func (g *Generator) initSpecWindows() { + g.initSpec() + if g.spec.Windows == nil { + g.spec.Windows = &rspec.Windows{} + } +} + +func (g *Generator) initSpecWindowsHyperV() { + g.initSpecWindows() + if g.spec.Windows.HyperV == nil { + g.spec.Windows.HyperV = &rspec.WindowsHyperV{} + } +} + +func (g *Generator) initSpecWindowsResources() { + g.initSpecWindows() + if g.spec.Windows.Resources == nil { + g.spec.Windows.Resources = &rspec.WindowsResources{} + } +} + +func (g *Generator) initSpecWindowsResourcesMemory() { + g.initSpecWindowsResources() + if g.spec.Windows.Resources.Memory == nil { + g.spec.Windows.Resources.Memory = &rspec.WindowsMemoryResources{} + } +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go b/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go new file mode 100644 index 00000000..dbe32af3 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/bundle.go @@ -0,0 +1,29 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // ConfigInRootBundleDir represents "This REQUIRED file MUST reside in the root of the bundle directory" + ConfigInRootBundleDir Code = 0xa001 + iota + // ConfigConstName represents "This REQUIRED file MUST be named `config.json`." + ConfigConstName + // ArtifactsInSingleDir represents "When supplied, while these artifacts MUST all be present in a single directory on the local filesystem, that directory itself is not part of the bundle." + ArtifactsInSingleDir +) + +var ( + containerFormatRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil + } +) + +func init() { + register(ConfigInRootBundleDir, rfc2119.Must, containerFormatRef) + register(ConfigConstName, rfc2119.Must, containerFormatRef) + register(ArtifactsInSingleDir, rfc2119.Must, containerFormatRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go new file mode 100644 index 00000000..aa2a284c --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config-linux.go @@ -0,0 +1,134 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // DefaultFilesystems represents "The following filesystems SHOULD be made available in each container's filesystem:" + DefaultFilesystems Code = 0xc001 + iota + // NSPathAbs represents "This value MUST be an absolute path in the runtime mount namespace." + NSPathAbs + // NSProcInPath represents "The runtime MUST place the container process in the namespace associated with that `path`." + NSProcInPath + // NSPathMatchTypeError represents "The runtime MUST generate an error if `path` is not associated with a namespace of type `type`." + NSPathMatchTypeError + // NSNewNSWithoutPath represents "If `path` is not specified, the runtime MUST create a new container namespace of type `type`." + NSNewNSWithoutPath + // NSInheritWithoutType represents "If a namespace type is not specified in the `namespaces` array, the container MUST inherit the runtime namespace of that type." + NSInheritWithoutType + // NSErrorOnDup represents "If a `namespaces` field contains duplicated namespaces with same `type`, the runtime MUST generate an error." + NSErrorOnDup + // UserNSMapOwnershipRO represents "The runtime SHOULD NOT modify the ownership of referenced filesystems to realize the mapping." + UserNSMapOwnershipRO + // DevicesAvailable represents "devices (array of objects, OPTIONAL) lists devices that MUST be available in the container." + DevicesAvailable + // DevicesFileNotMatch represents "If a file already exists at `path` that does not match the requested device, the runtime MUST generate an error." + DevicesFileNotMatch + // DevicesMajMinRequired represents "`major, minor` (int64, REQUIRED unless `type` is `p`) - major, minor numbers for the device." + DevicesMajMinRequired + // DevicesErrorOnDup represents "The same `type`, `major` and `minor` SHOULD NOT be used for multiple devices." + DevicesErrorOnDup + // DefaultDevices represents "In addition to any devices configured with this setting, the runtime MUST also supply default devices." + DefaultDevices + // CgroupsPathAbsOrRel represents "The value of `cgroupsPath` MUST be either an absolute path or a relative path." + CgroupsPathAbsOrRel + // CgroupsAbsPathRelToMount represents "In the case of an absolute path (starting with `/`), the runtime MUST take the path to be relative to the cgroups mount point." + CgroupsAbsPathRelToMount + // CgroupsPathAttach represents "If the value is specified, the runtime MUST consistently attach to the same place in the cgroups hierarchy given the same value of `cgroupsPath`." + CgroupsPathAttach + // CgroupsPathError represents "Runtimes MAY consider certain `cgroupsPath` values to be invalid, and MUST generate an error if this is the case." + CgroupsPathError + // DevicesApplyInOrder represents "The runtime MUST apply entries in the listed order." + DevicesApplyInOrder + // BlkIOWeightOrLeafWeightExist represents "You MUST specify at least one of `weight` or `leafWeight` in a given entry, and MAY specify both." + BlkIOWeightOrLeafWeightExist + // IntelRdtPIDWrite represents "If `intelRdt` is set, the runtime MUST write the container process ID to the `/tasks` file in a mounted `resctrl` pseudo-filesystem, using the container ID from `start` and creating the `container-id` directory if necessary." + IntelRdtPIDWrite + // IntelRdtNoMountedResctrlError represents "If no mounted `resctrl` pseudo-filesystem is available in the runtime mount namespace, the runtime MUST generate an error." + IntelRdtNoMountedResctrlError + // NotManipResctrlWithoutIntelRdt represents "If `intelRdt` is not set, the runtime MUST NOT manipulate any `resctrl` pseudo-filesystems." + NotManipResctrlWithoutIntelRdt + // IntelRdtL3CacheSchemaWrite represents "If `l3CacheSchema` is set, runtimes MUST write the value to the `schemata` file in the `` directory discussed in `intelRdt`." + IntelRdtL3CacheSchemaWrite + // IntelRdtL3CacheSchemaNotWrite represents "If `l3CacheSchema` is not set, runtimes MUST NOT write to `schemata` files in any `resctrl` pseudo-filesystems." + IntelRdtL3CacheSchemaNotWrite + // SeccSyscallsNamesRequired represents "`names` MUST contain at least one entry." + SeccSyscallsNamesRequired + // MaskedPathsAbs represents "maskedPaths (array of strings, OPTIONAL) will mask over the provided paths inside the container so that they cannot be read. The values MUST be absolute paths in the container namespace." + MaskedPathsAbs + // ReadonlyPathsAbs represents "readonlyPaths (array of strings, OPTIONAL) will set the provided paths as readonly inside the container. The values MUST be absolute paths in the container namespace." + ReadonlyPathsAbs +) + +var ( + defaultFilesystemsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil + } + namespacesRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#namespaces"), nil + } + userNamespaceMappingsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#user-namespace-mappings"), nil + } + devicesRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#devices"), nil + } + defaultDevicesRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-devices"), nil + } + cgroupsPathRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#cgroups-path"), nil + } + deviceWhitelistRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#device-whitelist"), nil + } + blockIoRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#block-io"), nil + } + intelrdtRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#intelrdt"), nil + } + seccompRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#seccomp"), nil + } + maskedPathsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#masked-paths"), nil + } + readonlyPathsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-linux.md#readonly-paths"), nil + } +) + +func init() { + register(DefaultFilesystems, rfc2119.Should, defaultFilesystemsRef) + register(NSPathAbs, rfc2119.Must, namespacesRef) + register(NSProcInPath, rfc2119.Must, namespacesRef) + register(NSPathMatchTypeError, rfc2119.Must, namespacesRef) + register(NSNewNSWithoutPath, rfc2119.Must, namespacesRef) + register(NSInheritWithoutType, rfc2119.Must, namespacesRef) + register(NSErrorOnDup, rfc2119.Must, namespacesRef) + register(UserNSMapOwnershipRO, rfc2119.Should, userNamespaceMappingsRef) + register(DevicesAvailable, rfc2119.Must, devicesRef) + register(DevicesFileNotMatch, rfc2119.Must, devicesRef) + register(DevicesMajMinRequired, rfc2119.Required, devicesRef) + register(DevicesErrorOnDup, rfc2119.Should, devicesRef) + register(DefaultDevices, rfc2119.Must, defaultDevicesRef) + register(CgroupsPathAbsOrRel, rfc2119.Must, cgroupsPathRef) + register(CgroupsAbsPathRelToMount, rfc2119.Must, cgroupsPathRef) + register(CgroupsPathAttach, rfc2119.Must, cgroupsPathRef) + register(CgroupsPathError, rfc2119.Must, cgroupsPathRef) + register(DevicesApplyInOrder, rfc2119.Must, deviceWhitelistRef) + register(BlkIOWeightOrLeafWeightExist, rfc2119.Must, blockIoRef) + register(IntelRdtPIDWrite, rfc2119.Must, intelrdtRef) + register(IntelRdtNoMountedResctrlError, rfc2119.Must, intelrdtRef) + register(NotManipResctrlWithoutIntelRdt, rfc2119.Must, intelrdtRef) + register(IntelRdtL3CacheSchemaWrite, rfc2119.Must, intelrdtRef) + register(IntelRdtL3CacheSchemaNotWrite, rfc2119.Must, intelrdtRef) + register(SeccSyscallsNamesRequired, rfc2119.Must, seccompRef) + register(MaskedPathsAbs, rfc2119.Must, maskedPathsRef) + register(ReadonlyPathsAbs, rfc2119.Must, readonlyPathsRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go new file mode 100644 index 00000000..c8134867 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config-windows.go @@ -0,0 +1,32 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // WindowsLayerFoldersRequired represents "`layerFolders` MUST contain at least one entry." + WindowsLayerFoldersRequired Code = 0xd001 + iota + // WindowsHyperVPresent represents "If present, the container MUST be run with Hyper-V isolation." + WindowsHyperVPresent + // WindowsHyperVOmit represents "If omitted, the container MUST be run as a Windows Server container." + WindowsHyperVOmit +) + +var ( + layerfoldersRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-windows.md#layerfolders"), nil + } + hypervRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config-windows.md#hyperv"), nil + } +) + +func init() { + register(WindowsLayerFoldersRequired, rfc2119.Must, layerfoldersRef) + register(WindowsHyperVPresent, rfc2119.Must, hypervRef) + register(WindowsHyperVOmit, rfc2119.Must, hypervRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/config.go b/vendor/github.com/opencontainers/runtime-tools/specerror/config.go new file mode 100644 index 00000000..8a73f47f --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/config.go @@ -0,0 +1,188 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // SpecVersionInSemVer represents "`ociVersion` (string, REQUIRED) MUST be in SemVer v2.0.0 format and specifies the version of the Open Container Initiative Runtime Specification with which the bundle complies." + SpecVersionInSemVer Code = 0xb001 + iota + // RootOnWindowsRequired represents "On Windows, for Windows Server Containers, this field is REQUIRED." + RootOnWindowsRequired + // RootOnHyperVNotSet represents "For Hyper-V Containers, this field MUST NOT be set." + RootOnHyperVNotSet + // RootOnNonHyperVRequired represents "On all other platforms, this field is REQUIRED." + RootOnNonHyperVRequired + // RootPathOnWindowsGUID represents "On Windows, `path` MUST be a volume GUID path." + RootPathOnWindowsGUID + // RootPathOnPosixConvention represents "The value SHOULD be the conventional `rootfs`." + RootPathOnPosixConvention + // RootPathExist represents "A directory MUST exist at the path declared by the field." + RootPathExist + // RootReadonlyImplement represents "`readonly` (bool, OPTIONAL) If true then the root filesystem MUST be read-only inside the container, defaults to false." + RootReadonlyImplement + // RootReadonlyOnWindowsFalse represents "* On Windows, this field MUST be omitted or false." + RootReadonlyOnWindowsFalse + // MountsInOrder represents "The runtime MUST mount entries in the listed order." + MountsInOrder + // MountsDestAbs represents "Destination of mount point: path inside container. This value MUST be an absolute path." + MountsDestAbs + // MountsDestOnWindowsNotNested represents "Windows: one mount destination MUST NOT be nested within another mount (e.g., c:\\foo and c:\\foo\\bar)." + MountsDestOnWindowsNotNested + // MountsOptionsOnWindowsROSupport represents "Windows: runtimes MUST support `ro`, mounting the filesystem read-only when `ro` is given." + MountsOptionsOnWindowsROSupport + // ProcRequiredAtStart represents "This property is REQUIRED when `start` is called." + ProcRequiredAtStart + // ProcConsoleSizeIgnore represents "Runtimes MUST ignore `consoleSize` if `terminal` is `false` or unset." + ProcConsoleSizeIgnore + // ProcCwdAbs represents "cwd (string, REQUIRED) is the working directory that will be set for the executable. This value MUST be an absolute path." + ProcCwdAbs + // ProcArgsOneEntryRequired represents "This specification extends the IEEE standard in that at least one entry is REQUIRED, and that entry is used with the same semantics as `execvp`'s *file*." + ProcArgsOneEntryRequired + // PosixProcRlimitsTypeGenError represents "The runtime MUST generate an error for any values which cannot be mapped to a relevant kernel interface." + PosixProcRlimitsTypeGenError + // PosixProcRlimitsTypeGet represents "For each entry in `rlimits`, a `getrlimit(3)` on `type` MUST succeed." + PosixProcRlimitsTypeGet + // PosixProcRlimitsTypeValueError represents "valid values are defined in the ... man page" + PosixProcRlimitsTypeValueError + // PosixProcRlimitsSoftMatchCur represents "`rlim.rlim_cur` MUST match the configured value." + PosixProcRlimitsSoftMatchCur + // PosixProcRlimitsHardMatchMax represents "`rlim.rlim_max` MUST match the configured value." + PosixProcRlimitsHardMatchMax + // PosixProcRlimitsErrorOnDup represents "If `rlimits` contains duplicated entries with same `type`, the runtime MUST generate an error." + PosixProcRlimitsErrorOnDup + // LinuxProcCapError represents "Any value which cannot be mapped to a relevant kernel interface MUST cause an error." + LinuxProcCapError + // LinuxProcOomScoreAdjSet represents "If `oomScoreAdj` is set, the runtime MUST set `oom_score_adj` to the given value." + LinuxProcOomScoreAdjSet + // LinuxProcOomScoreAdjNotSet represents "If `oomScoreAdj` is not set, the runtime MUST NOT change the value of `oom_score_adj`." + LinuxProcOomScoreAdjNotSet + // PlatformSpecConfOnWindowsSet represents "This MUST be set if the target platform of this spec is `windows`." + PlatformSpecConfOnWindowsSet + // PosixHooksPathAbs represents "This specification extends the IEEE standard in that `path` MUST be absolute." + PosixHooksPathAbs + // PosixHooksTimeoutPositive represents "If set, `timeout` MUST be greater than zero." + PosixHooksTimeoutPositive + // PosixHooksCalledInOrder represents "Hooks MUST be called in the listed order." + PosixHooksCalledInOrder + // PosixHooksStateToStdin represents "The state of the container MUST be passed to hooks over stdin so that they may do work appropriate to the current state of the container." + PosixHooksStateToStdin + // PrestartTiming represents "The pre-start hooks MUST be called after the `start` operation is called but before the user-specified program command is executed." + PrestartTiming + // PoststartTiming represents "The post-start hooks MUST be called after the user-specified process is executed but before the `start` operation returns." + PoststartTiming + // PoststopTiming represents "The post-stop hooks MUST be called after the container is deleted but before the `delete` operation returns." + PoststopTiming + // AnnotationsKeyValueMap represents "Annotations MUST be a key-value map." + AnnotationsKeyValueMap + // AnnotationsKeyString represents "Keys MUST be strings." + AnnotationsKeyString + // AnnotationsKeyRequired represents "Keys MUST NOT be an empty string." + AnnotationsKeyRequired + // AnnotationsKeyReversedDomain represents "Keys SHOULD be named using a reverse domain notation - e.g. `com.example.myKey`." + AnnotationsKeyReversedDomain + // AnnotationsKeyReservedNS represents "Keys using the `org.opencontainers` namespace are reserved and MUST NOT be used by subsequent specifications." + AnnotationsKeyReservedNS + // AnnotationsKeyIgnoreUnknown represents "Implementations that are reading/processing this configuration file MUST NOT generate an error if they encounter an unknown annotation key." + AnnotationsKeyIgnoreUnknown + // AnnotationsValueString represents "Values MUST be strings." + AnnotationsValueString + // ExtensibilityIgnoreUnknownProp represents "Runtimes that are reading or processing this configuration file MUST NOT generate an error if they encounter an unknown property." + ExtensibilityIgnoreUnknownProp + // ValidValues represents "Runtimes that are reading or processing this configuration file MUST generate an error when invalid or unsupported values are encountered." + ValidValues +) + +var ( + specificationVersionRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil + } + rootRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil + } + mountsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#mounts"), nil + } + processRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#process"), nil + } + posixProcessRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#posix-process"), nil + } + linuxProcessRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#linux-process"), nil + } + platformSpecificConfigurationRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#platform-specific-configuration"), nil + } + posixPlatformHooksRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#posix-platform-hooks"), nil + } + prestartRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#prestart"), nil + } + poststartRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#poststart"), nil + } + poststopRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#poststop"), nil + } + annotationsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#annotations"), nil + } + extensibilityRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#extensibility"), nil + } + validValuesRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "config.md#valid-values"), nil + } +) + +func init() { + register(SpecVersionInSemVer, rfc2119.Must, specificationVersionRef) + register(RootOnWindowsRequired, rfc2119.Required, rootRef) + register(RootOnHyperVNotSet, rfc2119.Must, rootRef) + register(RootOnNonHyperVRequired, rfc2119.Required, rootRef) + register(RootPathOnWindowsGUID, rfc2119.Must, rootRef) + register(RootPathOnPosixConvention, rfc2119.Should, rootRef) + register(RootPathExist, rfc2119.Must, rootRef) + register(RootReadonlyImplement, rfc2119.Must, rootRef) + register(RootReadonlyOnWindowsFalse, rfc2119.Must, rootRef) + register(MountsInOrder, rfc2119.Must, mountsRef) + register(MountsDestAbs, rfc2119.Must, mountsRef) + register(MountsDestOnWindowsNotNested, rfc2119.Must, mountsRef) + register(MountsOptionsOnWindowsROSupport, rfc2119.Must, mountsRef) + register(ProcRequiredAtStart, rfc2119.Required, processRef) + register(ProcConsoleSizeIgnore, rfc2119.Must, processRef) + register(ProcCwdAbs, rfc2119.Must, processRef) + register(ProcArgsOneEntryRequired, rfc2119.Required, processRef) + register(PosixProcRlimitsTypeGenError, rfc2119.Must, posixProcessRef) + register(PosixProcRlimitsTypeGet, rfc2119.Must, posixProcessRef) + register(PosixProcRlimitsTypeValueError, rfc2119.Should, posixProcessRef) + register(PosixProcRlimitsSoftMatchCur, rfc2119.Must, posixProcessRef) + register(PosixProcRlimitsHardMatchMax, rfc2119.Must, posixProcessRef) + register(PosixProcRlimitsErrorOnDup, rfc2119.Must, posixProcessRef) + register(LinuxProcCapError, rfc2119.Must, linuxProcessRef) + register(LinuxProcOomScoreAdjSet, rfc2119.Must, linuxProcessRef) + register(LinuxProcOomScoreAdjNotSet, rfc2119.Must, linuxProcessRef) + register(PlatformSpecConfOnWindowsSet, rfc2119.Must, platformSpecificConfigurationRef) + register(PosixHooksPathAbs, rfc2119.Must, posixPlatformHooksRef) + register(PosixHooksTimeoutPositive, rfc2119.Must, posixPlatformHooksRef) + register(PosixHooksCalledInOrder, rfc2119.Must, posixPlatformHooksRef) + register(PosixHooksStateToStdin, rfc2119.Must, posixPlatformHooksRef) + register(PrestartTiming, rfc2119.Must, prestartRef) + register(PoststartTiming, rfc2119.Must, poststartRef) + register(PoststopTiming, rfc2119.Must, poststopRef) + register(AnnotationsKeyValueMap, rfc2119.Must, annotationsRef) + register(AnnotationsKeyString, rfc2119.Must, annotationsRef) + register(AnnotationsKeyRequired, rfc2119.Must, annotationsRef) + register(AnnotationsKeyReversedDomain, rfc2119.Should, annotationsRef) + register(AnnotationsKeyReservedNS, rfc2119.Must, annotationsRef) + register(AnnotationsKeyIgnoreUnknown, rfc2119.Must, annotationsRef) + register(AnnotationsValueString, rfc2119.Must, annotationsRef) + register(ExtensibilityIgnoreUnknownProp, rfc2119.Must, extensibilityRef) + register(ValidValues, rfc2119.Must, validValuesRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/error.go b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go index c75bb6b1..0300f7df 100644 --- a/vendor/github.com/opencontainers/runtime-tools/specerror/error.go +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/error.go @@ -13,46 +13,13 @@ const referenceTemplate = "https://github.com/opencontainers/runtime-spec/blob/v // Code represents the spec violation, enumerating both // configuration violations and runtime violations. -type Code int +type Code int64 const ( // NonError represents that an input is not an error - NonError Code = iota + NonError Code = 0x1a001 + iota // NonRFCError represents that an error is not a rfc2119 error NonRFCError - - // ConfigFileExistence represents the error code of 'config.json' existence test - ConfigFileExistence - // ArtifactsInSingleDir represents the error code of artifacts place test - ArtifactsInSingleDir - - // SpecVersion represents the error code of specfication version test - SpecVersion - - // RootOnNonHyperV represents the error code of root setting test on non hyper-v containers - RootOnNonHyperV - // RootOnHyperV represents the error code of root setting test on hyper-v containers - RootOnHyperV - // PathFormatOnWindows represents the error code of the path format test on Window - PathFormatOnWindows - // PathName represents the error code of the path name test - PathName - // PathExistence represents the error code of the path existence test - PathExistence - // ReadonlyFilesystem represents the error code of readonly test - ReadonlyFilesystem - // ReadonlyOnWindows represents the error code of readonly setting test on Windows - ReadonlyOnWindows - - // DefaultFilesystems represents the error code of default filesystems test - DefaultFilesystems - - // CreateWithID represents the error code of 'create' lifecyle test with 'id' provided - CreateWithID - // CreateWithUniqueID represents the error code of 'create' lifecyle test with unique 'id' provided - CreateWithUniqueID - // CreateNewContainer represents the error code 'create' lifecyle test that creates new container - CreateNewContainer ) type errorTemplate struct { @@ -69,52 +36,24 @@ type Error struct { Code Code } -var ( - containerFormatRef = func(version string) (reference string, err error) { - return fmt.Sprintf(referenceTemplate, version, "bundle.md#container-format"), nil - } - specVersionRef = func(version string) (reference string, err error) { - return fmt.Sprintf(referenceTemplate, version, "config.md#specification-version"), nil - } - rootRef = func(version string) (reference string, err error) { - return fmt.Sprintf(referenceTemplate, version, "config.md#root"), nil - } - defaultFSRef = func(version string) (reference string, err error) { - return fmt.Sprintf(referenceTemplate, version, "config-linux.md#default-filesystems"), nil - } - runtimeCreateRef = func(version string) (reference string, err error) { - return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil - } -) +// LevelErrors represents Errors filtered into fatal and warnings. +type LevelErrors struct { + // Warnings holds Errors that were below a compliance-level threshold. + Warnings []*Error -var ociErrors = map[Code]errorTemplate{ - // Bundle.md - // Container Format - ConfigFileExistence: {Level: rfc2119.Must, Reference: containerFormatRef}, - ArtifactsInSingleDir: {Level: rfc2119.Must, Reference: containerFormatRef}, + // Error holds errors that were at or above a compliance-level + // threshold, as well as errors that are not Errors. + Error *multierror.Error +} - // Config.md - // Specification Version - SpecVersion: {Level: rfc2119.Must, Reference: specVersionRef}, - // Root - RootOnNonHyperV: {Level: rfc2119.Required, Reference: rootRef}, - RootOnHyperV: {Level: rfc2119.Must, Reference: rootRef}, - // TODO: add tests for 'PathFormatOnWindows' - PathFormatOnWindows: {Level: rfc2119.Must, Reference: rootRef}, - PathName: {Level: rfc2119.Should, Reference: rootRef}, - PathExistence: {Level: rfc2119.Must, Reference: rootRef}, - ReadonlyFilesystem: {Level: rfc2119.Must, Reference: rootRef}, - ReadonlyOnWindows: {Level: rfc2119.Must, Reference: rootRef}, +var ociErrors = map[Code]errorTemplate{} - // Config-Linux.md - // Default Filesystems - DefaultFilesystems: {Level: rfc2119.Should, Reference: defaultFSRef}, +func register(code Code, level rfc2119.Level, ref func(versiong string) (string, error)) { + if _, ok := ociErrors[code]; ok { + panic(fmt.Sprintf("should not regist a same code twice: %v", code)) + } - // Runtime.md - // Create - CreateWithID: {Level: rfc2119.Must, Reference: runtimeCreateRef}, - CreateWithUniqueID: {Level: rfc2119.Must, Reference: runtimeCreateRef}, - CreateNewContainer: {Level: rfc2119.Must, Reference: runtimeCreateRef}, + ociErrors[code] = errorTemplate{Level: level, Reference: ref} } // Error returns the error message with specification reference. @@ -168,3 +107,23 @@ func FindError(err error, code Code) Code { } return NonRFCError } + +// SplitLevel removes RFC 2119 errors with a level less than 'level' +// from the source error. If the source error is not a multierror, it +// is returned unchanged. +func SplitLevel(errIn error, level rfc2119.Level) (levelErrors LevelErrors, errOut error) { + merr, ok := errIn.(*multierror.Error) + if !ok { + return levelErrors, errIn + } + for _, err := range merr.Errors { + e, ok := err.(*Error) + if ok && e.Err.Level < level { + fmt.Println(e) + levelErrors.Warnings = append(levelErrors.Warnings, e) + continue + } + levelErrors.Error = multierror.Append(levelErrors.Error, err) + } + return levelErrors, nil +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go new file mode 100644 index 00000000..8eb259ba --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime-linux.go @@ -0,0 +1,23 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // DefaultRuntimeLinuxSymlinks represents "While creating the container (step 2 in the lifecycle), runtimes MUST create default symlinks if the source file exists after processing `mounts`." + DefaultRuntimeLinuxSymlinks Code = 0xf001 + iota +) + +var ( + devSymbolicLinksRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime-linux.md#dev-symbolic-links"), nil + } +) + +func init() { + register(DefaultRuntimeLinuxSymlinks, rfc2119.Must, devSymbolicLinksRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go new file mode 100644 index 00000000..0144f669 --- /dev/null +++ b/vendor/github.com/opencontainers/runtime-tools/specerror/runtime.go @@ -0,0 +1,179 @@ +package specerror + +import ( + "fmt" + + rfc2119 "github.com/opencontainers/runtime-tools/error" +) + +// define error codes +const ( + // EntityOperSameContainer represents "The entity using a runtime to create a container MUST be able to use the operations defined in this specification against that same container." + EntityOperSameContainer Code = 0xe001 + iota + // StateIDUniq represents "`id` (string, REQUIRED) is the container's ID. This MUST be unique across all containers on this host." + StateIDUniq + // StateNewStatus represents "Additional values MAY be defined by the runtime, however, they MUST be used to represent new runtime states not defined above." + StateNewStatus + // DefaultStateJSONPattern represents "When serialized in JSON, the format MUST adhere to the default pattern." + DefaultStateJSONPattern + // EnvCreateImplement represents "The container's runtime environment MUST be created according to the configuration in `config.json`." + EnvCreateImplement + // EnvCreateError represents "If the runtime is unable to create the environment specified in the `config.json`, it MUST generate an error." + EnvCreateError + // ProcNotRunAtResRequest represents "While the resources requested in the `config.json` MUST be created, the user-specified program (from `process`) MUST NOT be run at this time." + ProcNotRunAtResRequest + // ConfigUpdatesWithoutAffect represents "Any updates to `config.json` after this step MUST NOT affect the container." + ConfigUpdatesWithoutAffect + // PrestartHooksInvoke represents "The prestart hooks MUST be invoked by the runtime." + PrestartHooksInvoke + // PrestartHookFailGenError represents "If any prestart hook fails, the runtime MUST generate an error, stop the container, and continue the lifecycle at step 9." + PrestartHookFailGenError + // ProcImplement represents "The runtime MUST run the user-specified program, as specified by `process`." + ProcImplement + // PoststartHooksInvoke represents "The poststart hooks MUST be invoked by the runtime." + PoststartHooksInvoke + // PoststartHookFailGenWarn represents "If any poststart hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded." + PoststartHookFailGenWarn + // UndoCreateSteps represents "The container MUST be destroyed by undoing the steps performed during create phase (step 2)." + UndoCreateSteps + // PoststopHooksInvoke represents "The poststop hooks MUST be invoked by the runtime." + PoststopHooksInvoke + // PoststopHookFailGenWarn represents "If any poststop hook fails, the runtime MUST log a warning, but the remaining hooks and lifecycle continue as if the hook had succeeded." + PoststopHookFailGenWarn + // ErrorsLeaveStateUnchange represents "Unless otherwise stated, generating an error MUST leave the state of the environment as if the operation were never attempted - modulo any possible trivial ancillary changes such as logging." + ErrorsLeaveStateUnchange + // WarnsLeaveFlowUnchange represents "Unless otherwise stated, logging a warning does not change the flow of the operation; it MUST continue as if the warning had not been logged." + WarnsLeaveFlowUnchange + // DefaultOperations represents "Unless otherwise stated, runtimes MUST support the default operations." + DefaultOperations + // QueryWithoutIDGenError represents "This operation MUST generate an error if it is not provided the ID of a container." + QueryWithoutIDGenError + // QueryNonExistGenError represents "Attempting to query a container that does not exist MUST generate an error." + QueryNonExistGenError + // QueryStateImplement represents "This operation MUST return the state of a container as specified in the State section." + QueryStateImplement + // CreateWithBundlePathAndID represents "This operation MUST generate an error if it is not provided a path to the bundle and the container ID to associate with the container." + CreateWithBundlePathAndID + // CreateWithUniqueID represents "If the ID provided is not unique across all containers within the scope of the runtime, or is not valid in any other way, the implementation MUST generate an error and a new container MUST NOT be created." + CreateWithUniqueID + // CreateNewContainer represents "This operation MUST create a new container." + CreateNewContainer + // PropsApplyExceptProcOnCreate represents "All of the properties configured in `config.json` except for `process` MUST be applied." + PropsApplyExceptProcOnCreate + // ProcArgsApplyUntilStart represents `process.args` MUST NOT be applied until triggered by the `start` operation." + ProcArgsApplyUntilStart + // PropApplyFailGenError represents "If the runtime cannot apply a property as specified in the configuration, it MUST generate an error." + PropApplyFailGenError + // PropApplyFailNotCreate represents "If the runtime cannot apply a property as specified in the configuration, a new container MUST NOT be created." + PropApplyFailNotCreate + // StartWithoutIDGenError represents "`start` operation MUST generate an error if it is not provided the container ID." + StartWithoutIDGenError + // StartNonCreateHaveNoEffect represents "Attempting to `start` a container that is not `created` MUST have no effect on the container." + StartNonCreateHaveNoEffect + // StartNonCreateGenError represents "Attempting to `start` a container that is not `created` MUST generate an error." + StartNonCreateGenError + // StartProcImplement represents "`start` operation MUST run the user-specified program as specified by `process`." + StartProcImplement + // StartWithProcUnsetGenError represents "`start` operation MUST generate an error if `process` was not set." + StartWithProcUnsetGenError + // KillWithoutIDGenError represents "`kill` operation MUST generate an error if it is not provided the container ID." + KillWithoutIDGenError + // KillNonCreateRunHaveNoEffect represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST have no effect on the container." + KillNonCreateRunHaveNoEffect + // KillNonCreateRunGenError represents "Attempting to send a signal to a container that is neither `created` nor `running` MUST generate an error." + KillNonCreateRunGenError + // KillSignalImplement represents "`kill` operation MUST send the specified signal to the container process." + KillSignalImplement + // DeleteWithoutIDGenError represents "`delete` operation MUST generate an error if it is not provided the container ID." + DeleteWithoutIDGenError + // DeleteNonStopHaveNoEffect represents "Attempting to `delete` a container that is not `stopped` MUST have no effect on the container." + DeleteNonStopHaveNoEffect + // DeleteNonStopGenError represents "Attempting to `delete` a container that is not `stopped` MUST generate an error." + DeleteNonStopGenError + // DeleteResImplement represents "Deleting a container MUST delete the resources that were created during the `create` step." + DeleteResImplement + // DeleteOnlyCreatedRes represents "Note that resources associated with the container, but not created by this container, MUST NOT be deleted." + DeleteOnlyCreatedRes +) + +var ( + scopeOfAContainerRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#scope-of-a-container"), nil + } + stateRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#state"), nil + } + lifecycleRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#lifecycle"), nil + } + errorsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#errors"), nil + } + warningsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#warnings"), nil + } + operationsRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#operations"), nil + } + queryStateRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#query-state"), nil + } + createRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#create"), nil + } + startRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#start"), nil + } + killRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#kill"), nil + } + deleteRef = func(version string) (reference string, err error) { + return fmt.Sprintf(referenceTemplate, version, "runtime.md#delete"), nil + } +) + +func init() { + register(EntityOperSameContainer, rfc2119.Must, scopeOfAContainerRef) + register(StateIDUniq, rfc2119.Must, stateRef) + register(StateNewStatus, rfc2119.Must, stateRef) + register(DefaultStateJSONPattern, rfc2119.Must, stateRef) + register(EnvCreateImplement, rfc2119.Must, lifecycleRef) + register(EnvCreateError, rfc2119.Must, lifecycleRef) + register(ProcNotRunAtResRequest, rfc2119.Must, lifecycleRef) + register(ConfigUpdatesWithoutAffect, rfc2119.Must, lifecycleRef) + register(PrestartHooksInvoke, rfc2119.Must, lifecycleRef) + register(PrestartHookFailGenError, rfc2119.Must, lifecycleRef) + register(ProcImplement, rfc2119.Must, lifecycleRef) + register(PoststartHooksInvoke, rfc2119.Must, lifecycleRef) + register(PoststartHookFailGenWarn, rfc2119.Must, lifecycleRef) + register(UndoCreateSteps, rfc2119.Must, lifecycleRef) + register(PoststopHooksInvoke, rfc2119.Must, lifecycleRef) + register(PoststopHookFailGenWarn, rfc2119.Must, lifecycleRef) + register(ErrorsLeaveStateUnchange, rfc2119.Must, errorsRef) + register(WarnsLeaveFlowUnchange, rfc2119.Must, warningsRef) + register(DefaultOperations, rfc2119.Must, operationsRef) + register(QueryWithoutIDGenError, rfc2119.Must, queryStateRef) + register(QueryNonExistGenError, rfc2119.Must, queryStateRef) + register(QueryStateImplement, rfc2119.Must, queryStateRef) + register(CreateWithBundlePathAndID, rfc2119.Must, createRef) + register(CreateWithUniqueID, rfc2119.Must, createRef) + register(CreateNewContainer, rfc2119.Must, createRef) + register(PropsApplyExceptProcOnCreate, rfc2119.Must, createRef) + register(ProcArgsApplyUntilStart, rfc2119.Must, createRef) + register(PropApplyFailGenError, rfc2119.Must, createRef) + register(PropApplyFailNotCreate, rfc2119.Must, createRef) + register(StartWithoutIDGenError, rfc2119.Must, startRef) + register(StartNonCreateHaveNoEffect, rfc2119.Must, startRef) + register(StartNonCreateGenError, rfc2119.Must, startRef) + register(StartProcImplement, rfc2119.Must, startRef) + register(StartWithProcUnsetGenError, rfc2119.Must, startRef) + register(KillWithoutIDGenError, rfc2119.Must, killRef) + register(KillNonCreateRunHaveNoEffect, rfc2119.Must, killRef) + register(KillNonCreateRunGenError, rfc2119.Must, killRef) + register(KillSignalImplement, rfc2119.Must, killRef) + register(DeleteWithoutIDGenError, rfc2119.Must, deleteRef) + register(DeleteNonStopHaveNoEffect, rfc2119.Must, deleteRef) + register(DeleteNonStopGenError, rfc2119.Must, deleteRef) + register(DeleteResImplement, rfc2119.Must, deleteRef) + register(DeleteOnlyCreatedRes, rfc2119.Must, deleteRef) +} diff --git a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go index bbdb29c6..1030099d 100644 --- a/vendor/github.com/opencontainers/runtime-tools/validate/validate.go +++ b/vendor/github.com/opencontainers/runtime-tools/validate/validate.go @@ -20,33 +20,41 @@ import ( "github.com/blang/semver" "github.com/hashicorp/go-multierror" rspec "github.com/opencontainers/runtime-spec/specs-go" + osFilepath "github.com/opencontainers/runtime-tools/filepath" "github.com/sirupsen/logrus" "github.com/syndtr/gocapability/capability" "github.com/opencontainers/runtime-tools/specerror" + "github.com/xeipuuv/gojsonschema" ) const specConfig = "config.json" var ( - defaultRlimits = []string{ + // http://pubs.opengroup.org/onlinepubs/9699919799/functions/getrlimit.html + posixRlimits = []string{ "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA", "RLIMIT_FSIZE", - "RLIMIT_LOCKS", + "RLIMIT_NOFILE", + "RLIMIT_STACK", + } + + // https://git.kernel.org/pub/scm/docs/man-pages/man-pages.git/tree/man2/getrlimit.2?h=man-pages-4.13 + linuxRlimits = append(posixRlimits, []string{ "RLIMIT_MEMLOCK", "RLIMIT_MSGQUEUE", "RLIMIT_NICE", - "RLIMIT_NOFILE", "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING", - "RLIMIT_STACK", - } + }...) + + configSchemaTemplate = "https://raw.githubusercontent.com/opencontainers/runtime-spec/v%s/schema/config-schema.json" ) // Validator represents a validator for runtime bundle @@ -58,23 +66,20 @@ type Validator struct { } // NewValidator creates a Validator -func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool, platform string) Validator { +func NewValidator(spec *rspec.Spec, bundlePath string, hostSpecific bool, platform string) (Validator, error) { if hostSpecific && platform != runtime.GOOS { - platform = runtime.GOOS + return Validator{}, fmt.Errorf("When hostSpecific is set, platform must be same as the host platform") } return Validator{ spec: spec, bundlePath: bundlePath, HostSpecific: hostSpecific, platform: platform, - } + }, nil } // NewValidatorFromPath creates a Validator with specified bundle path func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) (Validator, error) { - if hostSpecific && platform != runtime.GOOS { - platform = runtime.GOOS - } if bundlePath == "" { return Validator{}, fmt.Errorf("bundle path shouldn't be empty") } @@ -86,7 +91,7 @@ func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) configPath := filepath.Join(bundlePath, specConfig) content, err := ioutil.ReadFile(configPath) if err != nil { - return Validator{}, specerror.NewError(specerror.ConfigFileExistence, err, rspec.Version) + return Validator{}, specerror.NewError(specerror.ConfigInRootBundleDir, err, rspec.Version) } if !utf8.Valid(content) { return Validator{}, fmt.Errorf("%q is not encoded in UTF-8", configPath) @@ -96,21 +101,68 @@ func NewValidatorFromPath(bundlePath string, hostSpecific bool, platform string) return Validator{}, err } - return NewValidator(&spec, bundlePath, hostSpecific, platform), nil + return NewValidator(&spec, bundlePath, hostSpecific, platform) } // CheckAll checks all parts of runtime bundle -func (v *Validator) CheckAll() (errs error) { +func (v *Validator) CheckAll() error { + var errs *multierror.Error + errs = multierror.Append(errs, v.CheckJSONSchema()) errs = multierror.Append(errs, v.CheckPlatform()) errs = multierror.Append(errs, v.CheckRoot()) errs = multierror.Append(errs, v.CheckMandatoryFields()) errs = multierror.Append(errs, v.CheckSemVer()) errs = multierror.Append(errs, v.CheckMounts()) errs = multierror.Append(errs, v.CheckProcess()) - errs = multierror.Append(errs, v.CheckHooks()) errs = multierror.Append(errs, v.CheckLinux()) + if v.platform == "linux" || v.platform == "solaris" { + errs = multierror.Append(errs, v.CheckHooks()) + } - return + return errs.ErrorOrNil() +} + +// JSONSchemaURL returns the URL for the JSON Schema specifying the +// configuration format. It consumes configSchemaTemplate, but we +// provide it as a function to isolate consumers from inconsistent +// naming as runtime-spec evolves. +func JSONSchemaURL(version string) (url string, err error) { + ver, err := semver.Parse(version) + if err != nil { + return "", specerror.NewError(specerror.SpecVersionInSemVer, err, rspec.Version) + } + configRenamedToConfigSchemaVersion, err := semver.Parse("1.0.0-rc2") // config.json became config-schema.json in 1.0.0-rc2 + if ver.Compare(configRenamedToConfigSchemaVersion) == -1 { + return "", fmt.Errorf("unsupported configuration version (older than %s)", configRenamedToConfigSchemaVersion) + } + return fmt.Sprintf(configSchemaTemplate, version), nil +} + +// CheckJSONSchema validates the configuration against the +// runtime-spec JSON Schema, using the version of the schema that +// matches the configuration's declared version. +func (v *Validator) CheckJSONSchema() (errs error) { + url, err := JSONSchemaURL(v.spec.Version) + if err != nil { + errs = multierror.Append(errs, err) + return errs + } + + schemaLoader := gojsonschema.NewReferenceLoader(url) + documentLoader := gojsonschema.NewGoLoader(v.spec) + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + errs = multierror.Append(errs, err) + return errs + } + + if !result.Valid() { + for _, resultError := range result.Errors() { + errs = multierror.Append(errs, errors.New(resultError.String())) + } + } + + return errs } // CheckRoot checks status of v.spec.Root @@ -120,13 +172,30 @@ func (v *Validator) CheckRoot() (errs error) { if v.platform == "windows" && v.spec.Windows != nil && v.spec.Windows.HyperV != nil { if v.spec.Root != nil { errs = multierror.Append(errs, - specerror.NewError(specerror.RootOnHyperV, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version)) + specerror.NewError(specerror.RootOnHyperVNotSet, fmt.Errorf("for Hyper-V containers, Root must not be set"), rspec.Version)) return } return } else if v.spec.Root == nil { errs = multierror.Append(errs, - specerror.NewError(specerror.RootOnNonHyperV, fmt.Errorf("for non-Hyper-V containers, Root must be set"), rspec.Version)) + specerror.NewError(specerror.RootOnNonHyperVRequired, fmt.Errorf("for non-Hyper-V containers, Root must be set"), rspec.Version)) + return + } + + if v.platform == "windows" { + matched, err := regexp.MatchString(`\\\\[?]\\Volume[{][a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}[}]\\`, v.spec.Root.Path) + if err != nil { + errs = multierror.Append(errs, err) + } else if !matched { + errs = multierror.Append(errs, + specerror.NewError(specerror.RootPathOnWindowsGUID, fmt.Errorf("root.path is %q, but it MUST be a volume GUID path when target platform is windows", v.spec.Root.Path), rspec.Version)) + } + + if v.spec.Root.Readonly { + errs = multierror.Append(errs, + specerror.NewError(specerror.RootReadonlyOnWindowsFalse, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version)) + } + return } @@ -138,7 +207,7 @@ func (v *Validator) CheckRoot() (errs error) { if filepath.Base(v.spec.Root.Path) != "rootfs" { errs = multierror.Append(errs, - specerror.NewError(specerror.PathName, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version)) + specerror.NewError(specerror.RootPathOnPosixConvention, fmt.Errorf("path name should be the conventional 'rootfs'"), rspec.Version)) } var rootfsPath string @@ -158,10 +227,10 @@ func (v *Validator) CheckRoot() (errs error) { if fi, err := os.Stat(rootfsPath); err != nil { errs = multierror.Append(errs, - specerror.NewError(specerror.PathExistence, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version)) + specerror.NewError(specerror.RootPathExist, fmt.Errorf("cannot find the root path %q", rootfsPath), rspec.Version)) } else if !fi.IsDir() { errs = multierror.Append(errs, - specerror.NewError(specerror.PathExistence, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version)) + specerror.NewError(specerror.RootPathExist, fmt.Errorf("root.path %q is not a directory", rootfsPath), rspec.Version)) } rootParent := filepath.Dir(absRootPath) @@ -170,13 +239,6 @@ func (v *Validator) CheckRoot() (errs error) { specerror.NewError(specerror.ArtifactsInSingleDir, fmt.Errorf("root.path is %q, but it MUST be a child of %q", v.spec.Root.Path, absBundlePath), rspec.Version)) } - if v.platform == "windows" { - if v.spec.Root.Readonly { - errs = multierror.Append(errs, - specerror.NewError(specerror.ReadonlyOnWindows, fmt.Errorf("root.readonly field MUST be omitted or false when target platform is windows"), rspec.Version)) - } - } - return } @@ -188,7 +250,7 @@ func (v *Validator) CheckSemVer() (errs error) { _, err := semver.Parse(version) if err != nil { errs = multierror.Append(errs, - specerror.NewError(specerror.SpecVersion, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version)) + specerror.NewError(specerror.SpecVersionInSemVer, fmt.Errorf("%q is not valid SemVer: %s", version, err.Error()), rspec.Version)) } if version != rspec.Version { errs = multierror.Append(errs, fmt.Errorf("validate currently only handles version %s, but the supplied configuration targets %s", rspec.Version, version)) @@ -201,19 +263,29 @@ func (v *Validator) CheckSemVer() (errs error) { func (v *Validator) CheckHooks() (errs error) { logrus.Debugf("check hooks") + if v.platform != "linux" && v.platform != "solaris" { + errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support hooks", v.platform)) + return + } + if v.spec.Hooks != nil { - errs = multierror.Append(errs, checkEventHooks("pre-start", v.spec.Hooks.Prestart, v.HostSpecific)) - errs = multierror.Append(errs, checkEventHooks("post-start", v.spec.Hooks.Poststart, v.HostSpecific)) - errs = multierror.Append(errs, checkEventHooks("post-stop", v.spec.Hooks.Poststop, v.HostSpecific)) + errs = multierror.Append(errs, v.checkEventHooks("prestart", v.spec.Hooks.Prestart, v.HostSpecific)) + errs = multierror.Append(errs, v.checkEventHooks("poststart", v.spec.Hooks.Poststart, v.HostSpecific)) + errs = multierror.Append(errs, v.checkEventHooks("poststop", v.spec.Hooks.Poststop, v.HostSpecific)) } return } -func checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) { - for _, hook := range hooks { - if !filepath.IsAbs(hook.Path) { - errs = multierror.Append(errs, fmt.Errorf("the %s hook %v: is not absolute path", hookType, hook.Path)) +func (v *Validator) checkEventHooks(hookType string, hooks []rspec.Hook, hostSpecific bool) (errs error) { + for i, hook := range hooks { + if !osFilepath.IsAbs(v.platform, hook.Path) { + errs = multierror.Append(errs, + specerror.NewError( + specerror.PosixHooksPathAbs, + fmt.Errorf("hooks.%s[%d].path %v: is not absolute path", + hookType, i, hook.Path), + rspec.Version)) } if hostSpecific { @@ -245,8 +317,12 @@ func (v *Validator) CheckProcess() (errs error) { } process := v.spec.Process - if !filepath.IsAbs(process.Cwd) { - errs = multierror.Append(errs, fmt.Errorf("cwd %q is not an absolute path", process.Cwd)) + if !osFilepath.IsAbs(v.platform, process.Cwd) { + errs = multierror.Append(errs, + specerror.NewError( + specerror.ProcCwdAbs, + fmt.Errorf("cwd %q is not an absolute path", process.Cwd), + rspec.Version)) } for _, env := range process.Env { @@ -256,9 +332,13 @@ func (v *Validator) CheckProcess() (errs error) { } if len(process.Args) == 0 { - errs = multierror.Append(errs, fmt.Errorf("args must not be empty")) + errs = multierror.Append(errs, + specerror.NewError( + specerror.ProcArgsOneEntryRequired, + fmt.Errorf("args must not be empty"), + rspec.Version)) } else { - if filepath.IsAbs(process.Args[0]) { + if filepath.IsAbs(process.Args[0]) && v.spec.Root != nil { var rootfsPath string if filepath.IsAbs(v.spec.Root.Path) { rootfsPath = v.spec.Root.Path @@ -280,12 +360,15 @@ func (v *Validator) CheckProcess() (errs error) { } } - if v.spec.Process.Capabilities != nil { - errs = multierror.Append(errs, v.CheckCapabilities()) + if v.platform == "linux" || v.platform == "solaris" { + errs = multierror.Append(errs, v.CheckRlimits()) } - errs = multierror.Append(errs, v.CheckRlimits()) if v.platform == "linux" { + if v.spec.Process.Capabilities != nil { + errs = multierror.Append(errs, v.CheckCapabilities()) + } + if len(process.ApparmorProfile) > 0 { profilePath := filepath.Join(v.bundlePath, v.spec.Root.Path, "/etc/apparmor.d", process.ApparmorProfile) _, err := os.Stat(profilePath) @@ -300,60 +383,61 @@ func (v *Validator) CheckProcess() (errs error) { // CheckCapabilities checks v.spec.Process.Capabilities func (v *Validator) CheckCapabilities() (errs error) { + if v.platform != "linux" { + errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.capabilities", v.platform)) + return + } + process := v.spec.Process - if v.platform == "linux" { - var effective, permitted, inheritable, ambient bool - caps := make(map[string][]string) + var effective, permitted, inheritable, ambient bool + caps := make(map[string][]string) - for _, cap := range process.Capabilities.Bounding { - caps[cap] = append(caps[cap], "bounding") - } - for _, cap := range process.Capabilities.Effective { - caps[cap] = append(caps[cap], "effective") - } - for _, cap := range process.Capabilities.Inheritable { - caps[cap] = append(caps[cap], "inheritable") - } - for _, cap := range process.Capabilities.Permitted { - caps[cap] = append(caps[cap], "permitted") - } - for _, cap := range process.Capabilities.Ambient { - caps[cap] = append(caps[cap], "ambient") + for _, cap := range process.Capabilities.Bounding { + caps[cap] = append(caps[cap], "bounding") + } + for _, cap := range process.Capabilities.Effective { + caps[cap] = append(caps[cap], "effective") + } + for _, cap := range process.Capabilities.Inheritable { + caps[cap] = append(caps[cap], "inheritable") + } + for _, cap := range process.Capabilities.Permitted { + caps[cap] = append(caps[cap], "permitted") + } + for _, cap := range process.Capabilities.Ambient { + caps[cap] = append(caps[cap], "ambient") + } + + for capability, owns := range caps { + if err := CapValid(capability, v.HostSpecific); err != nil { + errs = multierror.Append(errs, fmt.Errorf("capability %q is not valid, man capabilities(7)", capability)) } - for capability, owns := range caps { - if err := CapValid(capability, v.HostSpecific); err != nil { - errs = multierror.Append(errs, fmt.Errorf("capability %q is not valid, man capabilities(7)", capability)) + effective, permitted, ambient, inheritable = false, false, false, false + for _, set := range owns { + if set == "effective" { + effective = true + continue } - - effective, permitted, ambient, inheritable = false, false, false, false - for _, set := range owns { - if set == "effective" { - effective = true - continue - } - if set == "inheritable" { - inheritable = true - continue - } - if set == "permitted" { - permitted = true - continue - } - if set == "ambient" { - ambient = true - continue - } + if set == "inheritable" { + inheritable = true + continue } - if effective && !permitted { - errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability)) + if set == "permitted" { + permitted = true + continue } - if ambient && !(effective && inheritable) { - errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability)) + if set == "ambient" { + ambient = true + continue } } - } else { - logrus.Warnf("process.capabilities validation not yet implemented for OS %q", v.platform) + if effective && !permitted { + errs = multierror.Append(errs, fmt.Errorf("effective capability %q is not allowed, as it's not permitted", capability)) + } + if ambient && !(permitted && inheritable) { + errs = multierror.Append(errs, fmt.Errorf("ambient capability %q is not allowed, as it's not permitted and inheribate", capability)) + } } return @@ -361,11 +445,21 @@ func (v *Validator) CheckCapabilities() (errs error) { // CheckRlimits checks v.spec.Process.Rlimits func (v *Validator) CheckRlimits() (errs error) { + if v.platform != "linux" && v.platform != "solaris" { + errs = multierror.Append(errs, fmt.Errorf("For %q platform, the configuration structure does not support process.rlimits", v.platform)) + return + } + process := v.spec.Process for index, rlimit := range process.Rlimits { for i := index + 1; i < len(process.Rlimits); i++ { if process.Rlimits[index].Type == process.Rlimits[i].Type { - errs = multierror.Append(errs, fmt.Errorf("rlimit can not contain the same type %q", process.Rlimits[index].Type)) + errs = multierror.Append(errs, + specerror.NewError( + specerror.PosixProcRlimitsErrorOnDup, + fmt.Errorf("rlimit can not contain the same type %q", + process.Rlimits[index].Type), + rspec.Version)) } } errs = multierror.Append(errs, v.rlimitValid(rlimit)) @@ -429,31 +523,33 @@ func (v *Validator) CheckMounts() (errs error) { if supportedTypes != nil && !supportedTypes[mountA.Type] { errs = multierror.Append(errs, fmt.Errorf("unsupported mount type %q", mountA.Type)) } - if v.platform == "windows" { - if err := pathValid(v.platform, mountA.Destination); err != nil { - errs = multierror.Append(errs, err) - } - if err := pathValid(v.platform, mountA.Source); err != nil { - errs = multierror.Append(errs, err) - } - } else { - if err := pathValid(v.platform, mountA.Destination); err != nil { - errs = multierror.Append(errs, err) - } + if !osFilepath.IsAbs(v.platform, mountA.Destination) { + errs = multierror.Append(errs, + specerror.NewError( + specerror.MountsDestAbs, + fmt.Errorf("mounts[%d].destination %q is not absolute", + i, + mountA.Destination), + rspec.Version)) } for j, mountB := range v.spec.Mounts { if i == j { continue } // whether B.Desination is nested within A.Destination - nested, err := nestedValid(v.platform, mountA.Destination, mountB.Destination) + nested, err := osFilepath.IsAncestor(v.platform, mountA.Destination, mountB.Destination, ".") if err != nil { errs = multierror.Append(errs, err) continue } if nested { if v.platform == "windows" && i < j { - errs = multierror.Append(errs, fmt.Errorf("on Windows, %v nested within %v is forbidden", mountB.Destination, mountA.Destination)) + errs = multierror.Append(errs, + specerror.NewError( + specerror.MountsDestOnWindowsNotNested, + fmt.Errorf("on Windows, %v nested within %v is forbidden", + mountB.Destination, mountA.Destination), + rspec.Version)) } if i > j { logrus.Warnf("%v will be covered by %v", mountB.Destination, mountA.Destination) @@ -476,7 +572,11 @@ func (v *Validator) CheckPlatform() (errs error) { if v.platform == "windows" { if v.spec.Windows == nil { - errs = multierror.Append(errs, errors.New("'windows' MUST be set when platform is `windows`")) + errs = multierror.Append(errs, + specerror.NewError( + specerror.PlatformSpecConfOnWindowsSet, + fmt.Errorf("'windows' MUST be set when platform is `windows`"), + rspec.Version)) } } @@ -506,14 +606,14 @@ func (v *Validator) CheckLinux() (errs error) { for index := 0; index < len(v.spec.Linux.Namespaces); index++ { ns := v.spec.Linux.Namespaces[index] - if !namespaceValid(ns) { - errs = multierror.Append(errs, fmt.Errorf("namespace %v is invalid", ns)) + if ns.Path != "" && !osFilepath.IsAbs(v.platform, ns.Path) { + errs = multierror.Append(errs, specerror.NewError(specerror.NSPathAbs, fmt.Errorf("namespace.path %q is not an absolute path", ns.Path), rspec.Version)) } tmpItem := nsTypeList[ns.Type] tmpItem.num = tmpItem.num + 1 if tmpItem.num > 1 { - errs = multierror.Append(errs, fmt.Errorf("duplicated namespace %q", ns.Type)) + errs = multierror.Append(errs, specerror.NewError(specerror.NSErrorOnDup, fmt.Errorf("duplicated namespace %q", ns.Type), rspec.Version)) } if len(ns.Path) == 0 { @@ -524,10 +624,6 @@ func (v *Validator) CheckLinux() (errs error) { if (len(v.spec.Linux.UIDMappings) > 0 || len(v.spec.Linux.GIDMappings) > 0) && !nsTypeList[rspec.UserNamespace].newExist { errs = multierror.Append(errs, errors.New("the UID/GID mappings requires a new User namespace to be specified as well")) - } else if len(v.spec.Linux.UIDMappings) > 5 { - errs = multierror.Append(errs, errors.New("only 5 UID mappings are allowed (linux kernel restriction)")) - } else if len(v.spec.Linux.GIDMappings) > 5 { - errs = multierror.Append(errs, errors.New("only 5 GID mappings are allowed (linux kernel restriction)")) } for k := range v.spec.Linux.Sysctl { @@ -572,7 +668,8 @@ func (v *Validator) CheckLinux() (errs error) { } else { fStat, ok := fi.Sys().(*syscall.Stat_t) if !ok { - errs = multierror.Append(errs, fmt.Errorf("cannot determine state for device %s", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesAvailable, + fmt.Errorf("cannot determine state for device %s", device.Path), rspec.Version)) continue } var devType string @@ -587,7 +684,8 @@ func (v *Validator) CheckLinux() (errs error) { devType = "unmatched" } if devType != device.Type || (devType == "c" && device.Type == "u") { - errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch, + fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version)) continue } if devType != "p" { @@ -595,7 +693,8 @@ func (v *Validator) CheckLinux() (errs error) { major := (dev >> 8) & 0xfff minor := (dev & 0xff) | ((dev >> 12) & 0xfff00) if int64(major) != device.Major || int64(minor) != device.Minor { - errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch, + fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version)) continue } } @@ -603,19 +702,22 @@ func (v *Validator) CheckLinux() (errs error) { expectedPerm := *device.FileMode & os.ModePerm actualPerm := fi.Mode() & os.ModePerm if expectedPerm != actualPerm { - errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch, + fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version)) continue } } if device.UID != nil { if *device.UID != fStat.Uid { - errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch, + fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version)) continue } } if device.GID != nil { if *device.GID != fStat.Gid { - errs = multierror.Append(errs, fmt.Errorf("unmatched %s already exists in filesystem", device.Path)) + errs = multierror.Append(errs, specerror.NewError(specerror.DevicesFileNotMatch, + fmt.Errorf("unmatched %s already exists in filesystem", device.Path), rspec.Version)) continue } } @@ -641,33 +743,23 @@ func (v *Validator) CheckLinux() (errs error) { errs = multierror.Append(errs, v.CheckLinuxResources()) } - if v.spec.Linux.Seccomp != nil { - errs = multierror.Append(errs, v.CheckSeccomp()) - } - - switch v.spec.Linux.RootfsPropagation { - case "": - case "private": - case "rprivate": - case "slave": - case "rslave": - case "shared": - case "rshared": - case "unbindable": - case "runbindable": - default: - errs = multierror.Append(errs, errors.New("rootfsPropagation must be empty or one of \"private|rprivate|slave|rslave|shared|rshared|unbindable|runbindable\"")) - } - for _, maskedPath := range v.spec.Linux.MaskedPaths { if !strings.HasPrefix(maskedPath, "/") { - errs = multierror.Append(errs, fmt.Errorf("maskedPath %v is not an absolute path", maskedPath)) + errs = multierror.Append(errs, + specerror.NewError( + specerror.MaskedPathsAbs, + fmt.Errorf("maskedPath %v is not an absolute path", maskedPath), + rspec.Version)) } } for _, readonlyPath := range v.spec.Linux.ReadonlyPaths { if !strings.HasPrefix(readonlyPath, "/") { - errs = multierror.Append(errs, fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath)) + errs = multierror.Append(errs, + specerror.NewError( + specerror.ReadonlyPathsAbs, + fmt.Errorf("readonlyPath %v is not an absolute path", readonlyPath), + rspec.Version)) } } @@ -709,7 +801,7 @@ func (v *Validator) CheckLinuxResources() (errs error) { } for index := 0; index < len(r.Devices); index++ { switch r.Devices[index].Type { - case "a", "b", "c": + case "a", "b", "c", "": default: errs = multierror.Append(errs, fmt.Errorf("type of devices %s is invalid", r.Devices[index].Type)) } @@ -728,47 +820,6 @@ func (v *Validator) CheckLinuxResources() (errs error) { return } -// CheckSeccomp checkc v.spec.Linux.Seccomp -func (v *Validator) CheckSeccomp() (errs error) { - logrus.Debugf("check linux seccomp") - - s := v.spec.Linux.Seccomp - if !seccompActionValid(s.DefaultAction) { - errs = multierror.Append(errs, fmt.Errorf("seccomp defaultAction %q is invalid", s.DefaultAction)) - } - for index := 0; index < len(s.Syscalls); index++ { - if !syscallValid(s.Syscalls[index]) { - errs = multierror.Append(errs, fmt.Errorf("syscall %v is invalid", s.Syscalls[index])) - } - } - for index := 0; index < len(s.Architectures); index++ { - switch s.Architectures[index] { - case rspec.ArchX86: - case rspec.ArchX86_64: - case rspec.ArchX32: - case rspec.ArchARM: - case rspec.ArchAARCH64: - case rspec.ArchMIPS: - case rspec.ArchMIPS64: - case rspec.ArchMIPS64N32: - case rspec.ArchMIPSEL: - case rspec.ArchMIPSEL64: - case rspec.ArchMIPSEL64N32: - case rspec.ArchPPC: - case rspec.ArchPPC64: - case rspec.ArchPPC64LE: - case rspec.ArchS390: - case rspec.ArchS390X: - case rspec.ArchPARISC: - case rspec.ArchPARISC64: - default: - errs = multierror.Append(errs, fmt.Errorf("seccomp architecture %q is invalid", s.Architectures[index])) - } - } - - return -} - // CapValid checks whether a capability is valid func CapValid(c string, hostSpecific bool) error { isValid := false @@ -825,12 +876,19 @@ func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) { } if v.platform == "linux" { - for _, val := range defaultRlimits { + for _, val := range linuxRlimits { if val == rlimit.Type { return } } - errs = multierror.Append(errs, fmt.Errorf("rlimit type %q is invalid", rlimit.Type)) + errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version)) + } else if v.platform == "solaris" { + for _, val := range posixRlimits { + if val == rlimit.Type { + return + } + } + errs = multierror.Append(errs, specerror.NewError(specerror.PosixProcRlimitsTypeValueError, fmt.Errorf("rlimit type %q may not be valid", rlimit.Type), v.spec.Version)) } else { logrus.Warnf("process.rlimits validation not yet implemented for platform %q", v.platform) } @@ -838,85 +896,6 @@ func (v *Validator) rlimitValid(rlimit rspec.POSIXRlimit) (errs error) { return } -func namespaceValid(ns rspec.LinuxNamespace) bool { - switch ns.Type { - case rspec.PIDNamespace: - case rspec.NetworkNamespace: - case rspec.MountNamespace: - case rspec.IPCNamespace: - case rspec.UTSNamespace: - case rspec.UserNamespace: - case rspec.CgroupNamespace: - default: - return false - } - - if ns.Path != "" && !filepath.IsAbs(ns.Path) { - return false - } - - return true -} - -func pathValid(os, path string) error { - if os == "windows" { - matched, err := regexp.MatchString("^[a-zA-Z]:(\\\\[^\\\\/<>|:*?\"]+)+$", path) - if err != nil { - return err - } - if !matched { - return fmt.Errorf("invalid windows path %v", path) - } - return nil - } - if !filepath.IsAbs(path) { - return fmt.Errorf("%v is not an absolute path", path) - } - return nil -} - -// Check whether pathB is nested whithin pathA -func nestedValid(os, pathA, pathB string) (bool, error) { - if pathA == pathB { - return false, nil - } - if pathA == "/" && pathB != "" { - return true, nil - } - - var sep string - if os == "windows" { - sep = "\\" - } else { - sep = "/" - } - - splitedPathA := strings.Split(filepath.Clean(pathA), sep) - splitedPathB := strings.Split(filepath.Clean(pathB), sep) - lenA := len(splitedPathA) - lenB := len(splitedPathB) - - if lenA > lenB { - if (lenA - lenB) == 1 { - // if pathA is longer but not end with separator - if splitedPathA[lenA-1] != "" { - return false, nil - } - splitedPathA = splitedPathA[:lenA-1] - } else { - return false, nil - } - } - - for i, partA := range splitedPathA { - if partA != splitedPathB[i] { - return false, nil - } - } - - return true, nil -} - func deviceValid(d rspec.LinuxDevice) bool { switch d.Type { case "b", "c", "u": @@ -924,7 +903,7 @@ func deviceValid(d rspec.LinuxDevice) bool { return false } case "p": - if d.Major > 0 || d.Minor > 0 { + if d.Major != 0 || d.Minor != 0 { return false } default: @@ -933,41 +912,6 @@ func deviceValid(d rspec.LinuxDevice) bool { return true } -func seccompActionValid(secc rspec.LinuxSeccompAction) bool { - switch secc { - case "": - case rspec.ActKill: - case rspec.ActTrap: - case rspec.ActErrno: - case rspec.ActTrace: - case rspec.ActAllow: - default: - return false - } - return true -} - -func syscallValid(s rspec.LinuxSyscall) bool { - if !seccompActionValid(s.Action) { - return false - } - for index := 0; index < len(s.Args); index++ { - arg := s.Args[index] - switch arg.Op { - case rspec.OpNotEqual: - case rspec.OpLessThan: - case rspec.OpLessEqual: - case rspec.OpEqualTo: - case rspec.OpGreaterEqual: - case rspec.OpGreaterThan: - case rspec.OpMaskedEqual: - default: - return false - } - } - return true -} - func isStruct(t reflect.Type) bool { return t.Kind() == reflect.Struct } diff --git a/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/README.md b/vendor/github.com/xeipuuv/gojsonpointer/README.md new file mode 100644 index 00000000..dbe4d508 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/README.md @@ -0,0 +1,8 @@ +# gojsonpointer +An implementation of JSON Pointer - Go language + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +### Note +The 4.Evaluation part of the previous reference, starting with 'If the currently referenced value is a JSON array, the reference token MUST contain either...' is not implemented. diff --git a/vendor/github.com/xeipuuv/gojsonpointer/pointer.go b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go new file mode 100644 index 00000000..06f1918e --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonpointer/pointer.go @@ -0,0 +1,190 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonpointer +// repository-desc An implementation of JSON Pointer - Go language +// +// description Main and unique file. +// +// created 25-02-2013 + +package gojsonpointer + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +const ( + const_empty_pointer = `` + const_pointer_separator = `/` + + const_invalid_start = `JSON pointer must be empty or start with a "` + const_pointer_separator + `"` +) + +type implStruct struct { + mode string // "SET" or "GET" + + inDocument interface{} + + setInValue interface{} + + getOutNode interface{} + getOutKind reflect.Kind + outError error +} + +type JsonPointer struct { + referenceTokens []string +} + +// NewJsonPointer parses the given string JSON pointer and returns an object +func NewJsonPointer(jsonPointerString string) (p JsonPointer, err error) { + + // Pointer to the root of the document + if len(jsonPointerString) == 0 { + // Keep referenceTokens nil + return + } + if jsonPointerString[0] != '/' { + return p, errors.New(const_invalid_start) + } + + p.referenceTokens = strings.Split(jsonPointerString[1:], const_pointer_separator) + return +} + +// Uses the pointer to retrieve a value from a JSON document +func (p *JsonPointer) Get(document interface{}) (interface{}, reflect.Kind, error) { + + is := &implStruct{mode: "GET", inDocument: document} + p.implementation(is) + return is.getOutNode, is.getOutKind, is.outError + +} + +// Uses the pointer to update a value from a JSON document +func (p *JsonPointer) Set(document interface{}, value interface{}) (interface{}, error) { + + is := &implStruct{mode: "SET", inDocument: document, setInValue: value} + p.implementation(is) + return document, is.outError + +} + +// Both Get and Set functions use the same implementation to avoid code duplication +func (p *JsonPointer) implementation(i *implStruct) { + + kind := reflect.Invalid + + // Full document when empty + if len(p.referenceTokens) == 0 { + i.getOutNode = i.inDocument + i.outError = nil + i.getOutKind = kind + i.outError = nil + return + } + + node := i.inDocument + + for ti, token := range p.referenceTokens { + + isLastToken := ti == len(p.referenceTokens)-1 + + switch v := node.(type) { + + case map[string]interface{}: + decodedToken := decodeReferenceToken(token) + if _, ok := v[decodedToken]; ok { + node = v[decodedToken] + if isLastToken && i.mode == "SET" { + v[decodedToken] = i.setInValue + } + } else { + i.outError = fmt.Errorf("Object has no key '%s'", decodedToken) + i.getOutKind = reflect.Map + i.getOutNode = nil + return + } + + case []interface{}: + tokenIndex, err := strconv.Atoi(token) + if err != nil { + i.outError = fmt.Errorf("Invalid array index '%s'", token) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + if tokenIndex < 0 || tokenIndex >= len(v) { + i.outError = fmt.Errorf("Out of bound array[0,%d] index '%d'", len(v), tokenIndex) + i.getOutKind = reflect.Slice + i.getOutNode = nil + return + } + + node = v[tokenIndex] + if isLastToken && i.mode == "SET" { + v[tokenIndex] = i.setInValue + } + + default: + i.outError = fmt.Errorf("Invalid token reference '%s'", token) + i.getOutKind = reflect.ValueOf(node).Kind() + i.getOutNode = nil + return + } + + } + + i.getOutNode = node + i.getOutKind = reflect.ValueOf(node).Kind() + i.outError = nil +} + +// Pointer to string representation function +func (p *JsonPointer) String() string { + + if len(p.referenceTokens) == 0 { + return const_empty_pointer + } + + pointerString := const_pointer_separator + strings.Join(p.referenceTokens, const_pointer_separator) + + return pointerString +} + +// Specific JSON pointer encoding here +// ~0 => ~ +// ~1 => / +// ... and vice versa + +func decodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~1`, `/`, -1) + step2 := strings.Replace(step1, `~0`, `~`, -1) + return step2 +} + +func encodeReferenceToken(token string) string { + step1 := strings.Replace(token, `~`, `~0`, -1) + step2 := strings.Replace(step1, `/`, `~1`, -1) + return step2 +} diff --git a/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonreference/README.md b/vendor/github.com/xeipuuv/gojsonreference/README.md new file mode 100644 index 00000000..9ab6e1eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/README.md @@ -0,0 +1,10 @@ +# gojsonreference +An implementation of JSON Reference - Go language + +## Dependencies +https://github.com/xeipuuv/gojsonpointer + +## References +http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07 + +http://tools.ietf.org/html/draft-pbryan-zyp-json-ref-03 diff --git a/vendor/github.com/xeipuuv/gojsonreference/reference.go b/vendor/github.com/xeipuuv/gojsonreference/reference.go new file mode 100644 index 00000000..d4d2eca0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonreference/reference.go @@ -0,0 +1,141 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonreference +// repository-desc An implementation of JSON Reference - Go language +// +// description Main and unique file. +// +// created 26-02-2013 + +package gojsonreference + +import ( + "errors" + "github.com/xeipuuv/gojsonpointer" + "net/url" + "path/filepath" + "runtime" + "strings" +) + +const ( + const_fragment_char = `#` +) + +func NewJsonReference(jsonReferenceString string) (JsonReference, error) { + + var r JsonReference + err := r.parse(jsonReferenceString) + return r, err + +} + +type JsonReference struct { + referenceUrl *url.URL + referencePointer gojsonpointer.JsonPointer + + HasFullUrl bool + HasUrlPathOnly bool + HasFragmentOnly bool + HasFileScheme bool + HasFullFilePath bool +} + +func (r *JsonReference) GetUrl() *url.URL { + return r.referenceUrl +} + +func (r *JsonReference) GetPointer() *gojsonpointer.JsonPointer { + return &r.referencePointer +} + +func (r *JsonReference) String() string { + + if r.referenceUrl != nil { + return r.referenceUrl.String() + } + + if r.HasFragmentOnly { + return const_fragment_char + r.referencePointer.String() + } + + return r.referencePointer.String() +} + +func (r *JsonReference) IsCanonical() bool { + return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullUrl) +} + +// "Constructor", parses the given string JSON reference +func (r *JsonReference) parse(jsonReferenceString string) (err error) { + + r.referenceUrl, err = url.Parse(jsonReferenceString) + if err != nil { + return + } + refUrl := r.referenceUrl + + if refUrl.Scheme != "" && refUrl.Host != "" { + r.HasFullUrl = true + } else { + if refUrl.Path != "" { + r.HasUrlPathOnly = true + } else if refUrl.RawQuery == "" && refUrl.Fragment != "" { + r.HasFragmentOnly = true + } + } + + r.HasFileScheme = refUrl.Scheme == "file" + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, and if it + // doesn't then its first component will be treated as the host by the + // Go runtime + if refUrl.Host == "" && strings.HasPrefix(refUrl.Path, "/") { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path[1:]) + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Host + refUrl.Path) + } + } else { + r.HasFullFilePath = filepath.IsAbs(refUrl.Path) + } + + // invalid json-pointer error means url has no json-pointer fragment. simply ignore error + r.referencePointer, _ = gojsonpointer.NewJsonPointer(refUrl.Fragment) + + return +} + +// Creates a new reference from a parent and a child +// If the child cannot inherit from the parent, an error is returned +func (r *JsonReference) Inherits(child JsonReference) (*JsonReference, error) { + childUrl := child.GetUrl() + parentUrl := r.GetUrl() + if childUrl == nil { + return nil, errors.New("childUrl is nil!") + } + if parentUrl == nil { + return nil, errors.New("parentUrl is nil!") + } + + ref, err := NewJsonReference(parentUrl.ResolveReference(childUrl).String()) + if err != nil { + return nil, err + } + return &ref, err +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt new file mode 100644 index 00000000..55ede8a4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/LICENSE-APACHE-2.0.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2015 xeipuuv + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/xeipuuv/gojsonschema/README.md b/vendor/github.com/xeipuuv/gojsonschema/README.md new file mode 100644 index 00000000..fe43659d --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/README.md @@ -0,0 +1,295 @@ +[![GoDoc](https://godoc.org/github.com/xeipuuv/gojsonschema?status.svg)](https://godoc.org/github.com/xeipuuv/gojsonschema) +[![Build Status](https://travis-ci.org/xeipuuv/gojsonschema.svg)](https://travis-ci.org/xeipuuv/gojsonschema) + +# gojsonschema + +## Description + +An implementation of JSON Schema, based on IETF's draft v4 - Go language + +References : + +* http://json-schema.org +* http://json-schema.org/latest/json-schema-core.html +* http://json-schema.org/latest/json-schema-validation.html + +## Installation + +``` +go get github.com/xeipuuv/gojsonschema +``` + +Dependencies : +* [github.com/xeipuuv/gojsonpointer](https://github.com/xeipuuv/gojsonpointer) +* [github.com/xeipuuv/gojsonreference](https://github.com/xeipuuv/gojsonreference) +* [github.com/stretchr/testify/assert](https://github.com/stretchr/testify#assert-package) + +## Usage + +### Example + +```go + +package main + +import ( + "fmt" + "github.com/xeipuuv/gojsonschema" +) + +func main() { + + schemaLoader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") + documentLoader := gojsonschema.NewReferenceLoader("file:///home/me/document.json") + + result, err := gojsonschema.Validate(schemaLoader, documentLoader) + if err != nil { + panic(err.Error()) + } + + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, desc := range result.Errors() { + fmt.Printf("- %s\n", desc) + } + } + +} + + +``` + +#### Loaders + +There are various ways to load your JSON data. +In order to load your schemas and documents, +first declare an appropriate loader : + +* Web / HTTP, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("http://www.some_host.com/schema.json") +``` + +* Local file, using a reference : + +```go +loader := gojsonschema.NewReferenceLoader("file:///home/me/schema.json") +``` + +References use the URI scheme, the prefix (file://) and a full path to the file are required. + +* JSON strings : + +```go +loader := gojsonschema.NewStringLoader(`{"type": "string"}`) +``` + +* Custom Go types : + +```go +m := map[string]interface{}{"type": "string"} +loader := gojsonschema.NewGoLoader(m) +``` + +And + +```go +type Root struct { + Users []User `json:"users"` +} + +type User struct { + Name string `json:"name"` +} + +... + +data := Root{} +data.Users = append(data.Users, User{"John"}) +data.Users = append(data.Users, User{"Sophia"}) +data.Users = append(data.Users, User{"Bill"}) + +loader := gojsonschema.NewGoLoader(data) +``` + +#### Validation + +Once the loaders are set, validation is easy : + +```go +result, err := gojsonschema.Validate(schemaLoader, documentLoader) +``` + +Alternatively, you might want to load a schema only once and process to multiple validations : + +```go +schema, err := gojsonschema.NewSchema(schemaLoader) +... +result1, err := schema.Validate(documentLoader1) +... +result2, err := schema.Validate(documentLoader2) +... +// etc ... +``` + +To check the result : + +```go + if result.Valid() { + fmt.Printf("The document is valid\n") + } else { + fmt.Printf("The document is not valid. see errors :\n") + for _, err := range result.Errors() { + // Err implements the ResultError interface + fmt.Printf("- %s\n", err) + } + } +``` + +## Working with Errors + +The library handles string error codes which you can customize by creating your own gojsonschema.locale and setting it +```go +gojsonschema.Locale = YourCustomLocale{} +``` + +However, each error contains additional contextual information. + +**err.Type()**: *string* Returns the "type" of error that occurred. Note you can also type check. See below + +Note: An error of RequiredType has an err.Type() return value of "required" + + "required": RequiredError + "invalid_type": InvalidTypeError + "number_any_of": NumberAnyOfError + "number_one_of": NumberOneOfError + "number_all_of": NumberAllOfError + "number_not": NumberNotError + "missing_dependency": MissingDependencyError + "internal": InternalError + "enum": EnumError + "array_no_additional_items": ArrayNoAdditionalItemsError + "array_min_items": ArrayMinItemsError + "array_max_items": ArrayMaxItemsError + "unique": ItemsMustBeUniqueError + "array_min_properties": ArrayMinPropertiesError + "array_max_properties": ArrayMaxPropertiesError + "additional_property_not_allowed": AdditionalPropertyNotAllowedError + "invalid_property_pattern": InvalidPropertyPatternError + "string_gte": StringLengthGTEError + "string_lte": StringLengthLTEError + "pattern": DoesNotMatchPatternError + "multiple_of": MultipleOfError + "number_gte": NumberGTEError + "number_gt": NumberGTError + "number_lte": NumberLTEError + "number_lt": NumberLTError + +**err.Value()**: *interface{}* Returns the value given + +**err.Context()**: *gojsonschema.jsonContext* Returns the context. This has a String() method that will print something like this: (root).firstName + +**err.Field()**: *string* Returns the fieldname in the format firstName, or for embedded properties, person.firstName. This returns the same as the String() method on *err.Context()* but removes the (root). prefix. + +**err.Description()**: *string* The error description. This is based on the locale you are using. See the beginning of this section for overwriting the locale with a custom implementation. + +**err.Details()**: *gojsonschema.ErrorDetails* Returns a map[string]interface{} of additional error details specific to the error. For example, GTE errors will have a "min" value, LTE will have a "max" value. See errors.go for a full description of all the error details. Every error always contains a "field" key that holds the value of *err.Field()* + +Note in most cases, the err.Details() will be used to generate replacement strings in your locales, and not used directly. These strings follow the text/template format i.e. +``` +{{.field}} must be greater than or equal to {{.min}} +``` + +The library allows you to specify custom template functions, should you require more complex error message handling. +```go +gojsonschema.ErrorTemplateFuncs = map[string]interface{}{ + "allcaps": func(s string) string { + return strings.ToUpper(s) + }, +} +``` + +Given the above definition, you can use the custom function `"allcaps"` in your localization templates: +``` +{{allcaps .field}} must be greater than or equal to {{.min}} +``` + +The above error message would then be rendered with the `field` value in capital letters. For example: +``` +"PASSWORD must be greater than or equal to 8" +``` + +Learn more about what types of template functions you can use in `ErrorTemplateFuncs` by referring to Go's [text/template FuncMap](https://golang.org/pkg/text/template/#FuncMap) type. + +## Formats +JSON Schema allows for optional "format" property to validate instances against well-known formats. gojsonschema ships with all of the formats defined in the spec that you can use like this: +````json +{"type": "string", "format": "email"} +```` +Available formats: date-time, hostname, email, ipv4, ipv6, uri, uri-reference. + +For repetitive or more complex formats, you can create custom format checkers and add them to gojsonschema like this: + +```go +// Define the format checker +type RoleFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f RoleFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return strings.HasPrefix("ROLE_", asString) +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("role", RoleFormatChecker{}) +```` + +Now to use in your json schema: +````json +{"type": "string", "format": "role"} +```` + +Another example would be to check if the provided integer matches an id on database: + +JSON schema: +```json +{"type": "integer", "format": "ValidUserId"} +``` + +```go +// Define the format checker +type ValidUserIdFormatChecker struct {} + +// Ensure it meets the gojsonschema.FormatChecker interface +func (f ValidUserIdFormatChecker) IsFormat(input interface{}) bool { + + asFloat64, ok := input.(float64) // Numbers are always float64 here + if ok == false { + return false + } + + // XXX + // do the magic on the database looking for the int(asFloat64) + + return true +} + +// Add it to the library +gojsonschema.FormatCheckers.Add("ValidUserId", ValidUserIdFormatChecker{}) +```` + + + +## Uses + +gojsonschema uses the following test suite : + +https://github.com/json-schema/JSON-Schema-Test-Suite diff --git a/vendor/github.com/xeipuuv/gojsonschema/errors.go b/vendor/github.com/xeipuuv/gojsonschema/errors.go new file mode 100644 index 00000000..d39f0195 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/errors.go @@ -0,0 +1,283 @@ +package gojsonschema + +import ( + "bytes" + "sync" + "text/template" +) + +var errorTemplates errorTemplate = errorTemplate{template.New("errors-new"), sync.RWMutex{}} + +// template.Template is not thread-safe for writing, so some locking is done +// sync.RWMutex is used for efficiently locking when new templates are created +type errorTemplate struct { + *template.Template + sync.RWMutex +} + +type ( + // RequiredError. ErrorDetails: property string + RequiredError struct { + ResultErrorFields + } + + // InvalidTypeError. ErrorDetails: expected, given + InvalidTypeError struct { + ResultErrorFields + } + + // NumberAnyOfError. ErrorDetails: - + NumberAnyOfError struct { + ResultErrorFields + } + + // NumberOneOfError. ErrorDetails: - + NumberOneOfError struct { + ResultErrorFields + } + + // NumberAllOfError. ErrorDetails: - + NumberAllOfError struct { + ResultErrorFields + } + + // NumberNotError. ErrorDetails: - + NumberNotError struct { + ResultErrorFields + } + + // MissingDependencyError. ErrorDetails: dependency + MissingDependencyError struct { + ResultErrorFields + } + + // InternalError. ErrorDetails: error + InternalError struct { + ResultErrorFields + } + + // EnumError. ErrorDetails: allowed + EnumError struct { + ResultErrorFields + } + + // ArrayNoAdditionalItemsError. ErrorDetails: - + ArrayNoAdditionalItemsError struct { + ResultErrorFields + } + + // ArrayMinItemsError. ErrorDetails: min + ArrayMinItemsError struct { + ResultErrorFields + } + + // ArrayMaxItemsError. ErrorDetails: max + ArrayMaxItemsError struct { + ResultErrorFields + } + + // ItemsMustBeUniqueError. ErrorDetails: type + ItemsMustBeUniqueError struct { + ResultErrorFields + } + + // ArrayMinPropertiesError. ErrorDetails: min + ArrayMinPropertiesError struct { + ResultErrorFields + } + + // ArrayMaxPropertiesError. ErrorDetails: max + ArrayMaxPropertiesError struct { + ResultErrorFields + } + + // AdditionalPropertyNotAllowedError. ErrorDetails: property + AdditionalPropertyNotAllowedError struct { + ResultErrorFields + } + + // InvalidPropertyPatternError. ErrorDetails: property, pattern + InvalidPropertyPatternError struct { + ResultErrorFields + } + + // StringLengthGTEError. ErrorDetails: min + StringLengthGTEError struct { + ResultErrorFields + } + + // StringLengthLTEError. ErrorDetails: max + StringLengthLTEError struct { + ResultErrorFields + } + + // DoesNotMatchPatternError. ErrorDetails: pattern + DoesNotMatchPatternError struct { + ResultErrorFields + } + + // DoesNotMatchFormatError. ErrorDetails: format + DoesNotMatchFormatError struct { + ResultErrorFields + } + + // MultipleOfError. ErrorDetails: multiple + MultipleOfError struct { + ResultErrorFields + } + + // NumberGTEError. ErrorDetails: min + NumberGTEError struct { + ResultErrorFields + } + + // NumberGTError. ErrorDetails: min + NumberGTError struct { + ResultErrorFields + } + + // NumberLTEError. ErrorDetails: max + NumberLTEError struct { + ResultErrorFields + } + + // NumberLTError. ErrorDetails: max + NumberLTError struct { + ResultErrorFields + } +) + +// newError takes a ResultError type and sets the type, context, description, details, value, and field +func newError(err ResultError, context *jsonContext, value interface{}, locale locale, details ErrorDetails) { + var t string + var d string + switch err.(type) { + case *RequiredError: + t = "required" + d = locale.Required() + case *InvalidTypeError: + t = "invalid_type" + d = locale.InvalidType() + case *NumberAnyOfError: + t = "number_any_of" + d = locale.NumberAnyOf() + case *NumberOneOfError: + t = "number_one_of" + d = locale.NumberOneOf() + case *NumberAllOfError: + t = "number_all_of" + d = locale.NumberAllOf() + case *NumberNotError: + t = "number_not" + d = locale.NumberNot() + case *MissingDependencyError: + t = "missing_dependency" + d = locale.MissingDependency() + case *InternalError: + t = "internal" + d = locale.Internal() + case *EnumError: + t = "enum" + d = locale.Enum() + case *ArrayNoAdditionalItemsError: + t = "array_no_additional_items" + d = locale.ArrayNoAdditionalItems() + case *ArrayMinItemsError: + t = "array_min_items" + d = locale.ArrayMinItems() + case *ArrayMaxItemsError: + t = "array_max_items" + d = locale.ArrayMaxItems() + case *ItemsMustBeUniqueError: + t = "unique" + d = locale.Unique() + case *ArrayMinPropertiesError: + t = "array_min_properties" + d = locale.ArrayMinProperties() + case *ArrayMaxPropertiesError: + t = "array_max_properties" + d = locale.ArrayMaxProperties() + case *AdditionalPropertyNotAllowedError: + t = "additional_property_not_allowed" + d = locale.AdditionalPropertyNotAllowed() + case *InvalidPropertyPatternError: + t = "invalid_property_pattern" + d = locale.InvalidPropertyPattern() + case *StringLengthGTEError: + t = "string_gte" + d = locale.StringGTE() + case *StringLengthLTEError: + t = "string_lte" + d = locale.StringLTE() + case *DoesNotMatchPatternError: + t = "pattern" + d = locale.DoesNotMatchPattern() + case *DoesNotMatchFormatError: + t = "format" + d = locale.DoesNotMatchFormat() + case *MultipleOfError: + t = "multiple_of" + d = locale.MultipleOf() + case *NumberGTEError: + t = "number_gte" + d = locale.NumberGTE() + case *NumberGTError: + t = "number_gt" + d = locale.NumberGT() + case *NumberLTEError: + t = "number_lte" + d = locale.NumberLTE() + case *NumberLTError: + t = "number_lt" + d = locale.NumberLT() + } + + err.SetType(t) + err.SetContext(context) + err.SetValue(value) + err.SetDetails(details) + details["field"] = err.Field() + + if _, exists := details["context"]; !exists && context != nil { + details["context"] = context.String() + } + + err.SetDescription(formatErrorDescription(d, details)) +} + +// formatErrorDescription takes a string in the default text/template +// format and converts it to a string with replacements. The fields come +// from the ErrorDetails struct and vary for each type of error. +func formatErrorDescription(s string, details ErrorDetails) string { + + var tpl *template.Template + var descrAsBuffer bytes.Buffer + var err error + + errorTemplates.RLock() + tpl = errorTemplates.Lookup(s) + errorTemplates.RUnlock() + + if tpl == nil { + errorTemplates.Lock() + tpl = errorTemplates.New(s) + + if ErrorTemplateFuncs != nil { + tpl.Funcs(ErrorTemplateFuncs) + } + + tpl, err = tpl.Parse(s) + errorTemplates.Unlock() + + if err != nil { + return err.Error() + } + } + + err = tpl.Execute(&descrAsBuffer, details) + if err != nil { + return err.Error() + } + + return descrAsBuffer.String() +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go new file mode 100644 index 00000000..c6a07923 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/format_checkers.go @@ -0,0 +1,250 @@ +package gojsonschema + +import ( + "net" + "net/url" + "regexp" + "strings" + "time" +) + +type ( + // FormatChecker is the interface all formatters added to FormatCheckerChain must implement + FormatChecker interface { + IsFormat(input interface{}) bool + } + + // FormatCheckerChain holds the formatters + FormatCheckerChain struct { + formatters map[string]FormatChecker + } + + // EmailFormatter verifies email address formats + EmailFormatChecker struct{} + + // IPV4FormatChecker verifies IP addresses in the ipv4 format + IPV4FormatChecker struct{} + + // IPV6FormatChecker verifies IP addresses in the ipv6 format + IPV6FormatChecker struct{} + + // DateTimeFormatChecker verifies date/time formats per RFC3339 5.6 + // + // Valid formats: + // Partial Time: HH:MM:SS + // Full Date: YYYY-MM-DD + // Full Time: HH:MM:SSZ-07:00 + // Date Time: YYYY-MM-DDTHH:MM:SSZ-0700 + // + // Where + // YYYY = 4DIGIT year + // MM = 2DIGIT month ; 01-12 + // DD = 2DIGIT day-month ; 01-28, 01-29, 01-30, 01-31 based on month/year + // HH = 2DIGIT hour ; 00-23 + // MM = 2DIGIT ; 00-59 + // SS = 2DIGIT ; 00-58, 00-60 based on leap second rules + // T = Literal + // Z = Literal + // + // Note: Nanoseconds are also suported in all formats + // + // http://tools.ietf.org/html/rfc3339#section-5.6 + DateTimeFormatChecker struct{} + + // URIFormatChecker validates a URI with a valid Scheme per RFC3986 + URIFormatChecker struct{} + + // URIReferenceFormatChecker validates a URI or relative-reference per RFC3986 + URIReferenceFormatChecker struct{} + + // HostnameFormatChecker validates a hostname is in the correct format + HostnameFormatChecker struct{} + + // UUIDFormatChecker validates a UUID is in the correct format + UUIDFormatChecker struct{} + + // RegexFormatChecker validates a regex is in the correct format + RegexFormatChecker struct{} +) + +var ( + // Formatters holds the valid formatters, and is a public variable + // so library users can add custom formatters + FormatCheckers = FormatCheckerChain{ + formatters: map[string]FormatChecker{ + "date-time": DateTimeFormatChecker{}, + "hostname": HostnameFormatChecker{}, + "email": EmailFormatChecker{}, + "ipv4": IPV4FormatChecker{}, + "ipv6": IPV6FormatChecker{}, + "uri": URIFormatChecker{}, + "uri-reference": URIReferenceFormatChecker{}, + "uuid": UUIDFormatChecker{}, + "regex": RegexFormatChecker{}, + }, + } + + // Regex credit: https://github.com/asaskevich/govalidator + rxEmail = regexp.MustCompile("^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$") + + // Regex credit: https://www.socketloop.com/tutorials/golang-validate-hostname + rxHostname = regexp.MustCompile(`^([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])(\.([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]{0,61}[a-zA-Z0-9]))*$`) + + rxUUID = regexp.MustCompile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}$") +) + +// Add adds a FormatChecker to the FormatCheckerChain +// The name used will be the value used for the format key in your json schema +func (c *FormatCheckerChain) Add(name string, f FormatChecker) *FormatCheckerChain { + c.formatters[name] = f + + return c +} + +// Remove deletes a FormatChecker from the FormatCheckerChain (if it exists) +func (c *FormatCheckerChain) Remove(name string) *FormatCheckerChain { + delete(c.formatters, name) + + return c +} + +// Has checks to see if the FormatCheckerChain holds a FormatChecker with the given name +func (c *FormatCheckerChain) Has(name string) bool { + _, ok := c.formatters[name] + + return ok +} + +// IsFormat will check an input against a FormatChecker with the given name +// to see if it is the correct format +func (c *FormatCheckerChain) IsFormat(name string, input interface{}) bool { + f, ok := c.formatters[name] + + if !ok { + return false + } + + return f.IsFormat(input) +} + +func (f EmailFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return rxEmail.MatchString(asString) +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV4FormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ".") +} + +// Credit: https://github.com/asaskevich/govalidator +func (f IPV6FormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + ip := net.ParseIP(asString) + return ip != nil && strings.Contains(asString, ":") +} + +func (f DateTimeFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + formats := []string{ + "15:04:05", + "15:04:05Z07:00", + "2006-01-02", + time.RFC3339, + time.RFC3339Nano, + } + + for _, format := range formats { + if _, err := time.Parse(format, asString); err == nil { + return true + } + } + + return false +} + +func (f URIFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + u, err := url.Parse(asString) + if err != nil || u.Scheme == "" { + return false + } + + return true +} + +func (f URIReferenceFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + _, err := url.Parse(asString) + return err == nil +} + +func (f HostnameFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return rxHostname.MatchString(asString) && len(asString) < 256 +} + +func (f UUIDFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + return rxUUID.MatchString(asString) +} + +// IsFormat implements FormatChecker interface. +func (f RegexFormatChecker) IsFormat(input interface{}) bool { + + asString, ok := input.(string) + if ok == false { + return false + } + + if asString == "" { + return true + } + _, err := regexp.Compile(asString) + if err != nil { + return false + } + return true +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/internalLog.go b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go new file mode 100644 index 00000000..4ef7a8d0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/internalLog.go @@ -0,0 +1,37 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Very simple log wrapper. +// Used for debugging/testing purposes. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "log" +) + +const internalLogEnabled = false + +func internalLog(format string, v ...interface{}) { + log.Printf(format, v...) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go new file mode 100644 index 00000000..fcc8d9d6 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonContext.go @@ -0,0 +1,72 @@ +// Copyright 2013 MongoDB, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author tolsen +// author-github https://github.com/tolsen +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Implements a persistent (immutable w/ shared structure) singly-linked list of strings for the purpose of storing a json context +// +// created 04-09-2013 + +package gojsonschema + +import "bytes" + +// jsonContext implements a persistent linked-list of strings +type jsonContext struct { + head string + tail *jsonContext +} + +func newJsonContext(head string, tail *jsonContext) *jsonContext { + return &jsonContext{head, tail} +} + +// String displays the context in reverse. +// This plays well with the data structure's persistent nature with +// Cons and a json document's tree structure. +func (c *jsonContext) String(del ...string) string { + byteArr := make([]byte, 0, c.stringLen()) + buf := bytes.NewBuffer(byteArr) + c.writeStringToBuffer(buf, del) + + return buf.String() +} + +func (c *jsonContext) stringLen() int { + length := 0 + if c.tail != nil { + length = c.tail.stringLen() + 1 // add 1 for "." + } + + length += len(c.head) + return length +} + +func (c *jsonContext) writeStringToBuffer(buf *bytes.Buffer, del []string) { + if c.tail != nil { + c.tail.writeStringToBuffer(buf, del) + + if len(del) > 0 { + buf.WriteString(del[0]) + } else { + buf.WriteString(".") + } + } + + buf.WriteString(c.head) +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go new file mode 100644 index 00000000..a77a81e4 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/jsonLoader.go @@ -0,0 +1,341 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Different strategies to load JSON files. +// Includes References (file and HTTP), JSON strings and Go types. +// +// created 01-02-2015 + +package gojsonschema + +import ( + "bytes" + "encoding/json" + "errors" + "io" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + + + "github.com/xeipuuv/gojsonreference" +) + +var osFS = osFileSystem(os.Open) + +// JSON loader interface + +type JSONLoader interface { + JsonSource() interface{} + LoadJSON() (interface{}, error) + JsonReference() (gojsonreference.JsonReference, error) + LoaderFactory() JSONLoaderFactory +} + +type JSONLoaderFactory interface { + New(source string) JSONLoader +} + +type DefaultJSONLoaderFactory struct { +} + +type FileSystemJSONLoaderFactory struct { + fs http.FileSystem +} + +func (d DefaultJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +func (f FileSystemJSONLoaderFactory) New(source string) JSONLoader { + return &jsonReferenceLoader{ + fs: f.fs, + source: source, + } +} + +// osFileSystem is a functional wrapper for os.Open that implements http.FileSystem. +type osFileSystem func(string) (*os.File, error) + +func (o osFileSystem) Open(name string) (http.File, error) { + return o(name) +} + +// JSON Reference loader +// references are used to load JSONs from files and HTTP + +type jsonReferenceLoader struct { + fs http.FileSystem + source string +} + +func (l *jsonReferenceLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonReferenceLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference(l.JsonSource().(string)) +} + +func (l *jsonReferenceLoader) LoaderFactory() JSONLoaderFactory { + return &FileSystemJSONLoaderFactory{ + fs: l.fs, + } +} + +// NewReferenceLoader returns a JSON reference loader using the given source and the local OS file system. +func NewReferenceLoader(source string) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: osFS, + source: source, + } +} + +// NewReferenceLoaderFileSystem returns a JSON reference loader using the given source and file system. +func NewReferenceLoaderFileSystem(source string, fs http.FileSystem) *jsonReferenceLoader { + return &jsonReferenceLoader{ + fs: fs, + source: source, + } +} + +func (l *jsonReferenceLoader) LoadJSON() (interface{}, error) { + + var err error + + reference, err := gojsonreference.NewJsonReference(l.JsonSource().(string)) + if err != nil { + return nil, err + } + + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + var document interface{} + + if reference.HasFileScheme { + + filename := strings.Replace(refToUrl.GetUrl().Path, "file://", "", -1) + if runtime.GOOS == "windows" { + // on Windows, a file URL may have an extra leading slash, use slashes + // instead of backslashes, and have spaces escaped + if strings.HasPrefix(filename, "/") { + filename = filename[1:] + } + filename = filepath.FromSlash(filename) + } + + document, err = l.loadFromFile(filename) + if err != nil { + return nil, err + } + + } else { + + document, err = l.loadFromHTTP(refToUrl.String()) + if err != nil { + return nil, err + } + + } + + return document, nil + +} + +func (l *jsonReferenceLoader) loadFromHTTP(address string) (interface{}, error) { + + resp, err := http.Get(address) + if err != nil { + return nil, err + } + + // must return HTTP Status 200 OK + if resp.StatusCode != http.StatusOK { + return nil, errors.New(formatErrorDescription(Locale.HttpBadStatus(), ErrorDetails{"status": resp.Status})) + } + + bodyBuff, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +func (l *jsonReferenceLoader) loadFromFile(path string) (interface{}, error) { + f, err := l.fs.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + + bodyBuff, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(bodyBuff)) + +} + +// JSON string loader + +type jsonStringLoader struct { + source string +} + +func (l *jsonStringLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonStringLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonStringLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewStringLoader(source string) *jsonStringLoader { + return &jsonStringLoader{source: source} +} + +func (l *jsonStringLoader) LoadJSON() (interface{}, error) { + + return decodeJsonUsingNumber(strings.NewReader(l.JsonSource().(string))) + +} + +// JSON bytes loader + +type jsonBytesLoader struct { + source []byte +} + +func (l *jsonBytesLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonBytesLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonBytesLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewBytesLoader(source []byte) *jsonBytesLoader { + return &jsonBytesLoader{source: source} +} + +func (l *jsonBytesLoader) LoadJSON() (interface{}, error) { + return decodeJsonUsingNumber(bytes.NewReader(l.JsonSource().([]byte))) +} + +// JSON Go (types) loader +// used to load JSONs from the code as maps, interface{}, structs ... + +type jsonGoLoader struct { + source interface{} +} + +func (l *jsonGoLoader) JsonSource() interface{} { + return l.source +} + +func (l *jsonGoLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonGoLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func NewGoLoader(source interface{}) *jsonGoLoader { + return &jsonGoLoader{source: source} +} + +func (l *jsonGoLoader) LoadJSON() (interface{}, error) { + + // convert it to a compliant JSON first to avoid types "mismatches" + + jsonBytes, err := json.Marshal(l.JsonSource()) + if err != nil { + return nil, err + } + + return decodeJsonUsingNumber(bytes.NewReader(jsonBytes)) + +} + +type jsonIOLoader struct { + buf *bytes.Buffer +} + +func NewReaderLoader(source io.Reader) (*jsonIOLoader, io.Reader) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.TeeReader(source, buf) +} + +func NewWriterLoader(source io.Writer) (*jsonIOLoader, io.Writer) { + buf := &bytes.Buffer{} + return &jsonIOLoader{buf: buf}, io.MultiWriter(source, buf) +} + +func (l *jsonIOLoader) JsonSource() interface{} { + return l.buf.String() +} + +func (l *jsonIOLoader) LoadJSON() (interface{}, error) { + return decodeJsonUsingNumber(l.buf) +} + +func (l *jsonIOLoader) JsonReference() (gojsonreference.JsonReference, error) { + return gojsonreference.NewJsonReference("#") +} + +func (l *jsonIOLoader) LoaderFactory() JSONLoaderFactory { + return &DefaultJSONLoaderFactory{} +} + +func decodeJsonUsingNumber(r io.Reader) (interface{}, error) { + + var document interface{} + + decoder := json.NewDecoder(r) + decoder.UseNumber() + + err := decoder.Decode(&document) + if err != nil { + return nil, err + } + + return document, nil + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/locales.go b/vendor/github.com/xeipuuv/gojsonschema/locales.go new file mode 100644 index 00000000..9446f848 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/locales.go @@ -0,0 +1,286 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const string and messages. +// +// created 01-01-2015 + +package gojsonschema + +type ( + // locale is an interface for defining custom error strings + locale interface { + Required() string + InvalidType() string + NumberAnyOf() string + NumberOneOf() string + NumberAllOf() string + NumberNot() string + MissingDependency() string + Internal() string + Enum() string + ArrayNotEnoughItems() string + ArrayNoAdditionalItems() string + ArrayMinItems() string + ArrayMaxItems() string + Unique() string + ArrayMinProperties() string + ArrayMaxProperties() string + AdditionalPropertyNotAllowed() string + InvalidPropertyPattern() string + StringGTE() string + StringLTE() string + DoesNotMatchPattern() string + DoesNotMatchFormat() string + MultipleOf() string + NumberGTE() string + NumberGT() string + NumberLTE() string + NumberLT() string + + // Schema validations + RegexPattern() string + GreaterThanZero() string + MustBeOfA() string + MustBeOfAn() string + CannotBeUsedWithout() string + CannotBeGT() string + MustBeOfType() string + MustBeValidRegex() string + MustBeValidFormat() string + MustBeGTEZero() string + KeyCannotBeGreaterThan() string + KeyItemsMustBeOfType() string + KeyItemsMustBeUnique() string + ReferenceMustBeCanonical() string + NotAValidType() string + Duplicated() string + HttpBadStatus() string + ParseError() string + + // ErrorFormat + ErrorFormat() string + } + + // DefaultLocale is the default locale for this package + DefaultLocale struct{} +) + +func (l DefaultLocale) Required() string { + return `{{.property}} is required` +} + +func (l DefaultLocale) InvalidType() string { + return `Invalid type. Expected: {{.expected}}, given: {{.given}}` +} + +func (l DefaultLocale) NumberAnyOf() string { + return `Must validate at least one schema (anyOf)` +} + +func (l DefaultLocale) NumberOneOf() string { + return `Must validate one and only one schema (oneOf)` +} + +func (l DefaultLocale) NumberAllOf() string { + return `Must validate all the schemas (allOf)` +} + +func (l DefaultLocale) NumberNot() string { + return `Must not validate the schema (not)` +} + +func (l DefaultLocale) MissingDependency() string { + return `Has a dependency on {{.dependency}}` +} + +func (l DefaultLocale) Internal() string { + return `Internal Error {{.error}}` +} + +func (l DefaultLocale) Enum() string { + return `{{.field}} must be one of the following: {{.allowed}}` +} + +func (l DefaultLocale) ArrayNoAdditionalItems() string { + return `No additional items allowed on array` +} + +func (l DefaultLocale) ArrayNotEnoughItems() string { + return `Not enough items on array to match positional list of schema` +} + +func (l DefaultLocale) ArrayMinItems() string { + return `Array must have at least {{.min}} items` +} + +func (l DefaultLocale) ArrayMaxItems() string { + return `Array must have at most {{.max}} items` +} + +func (l DefaultLocale) Unique() string { + return `{{.type}} items must be unique` +} + +func (l DefaultLocale) ArrayMinProperties() string { + return `Must have at least {{.min}} properties` +} + +func (l DefaultLocale) ArrayMaxProperties() string { + return `Must have at most {{.max}} properties` +} + +func (l DefaultLocale) AdditionalPropertyNotAllowed() string { + return `Additional property {{.property}} is not allowed` +} + +func (l DefaultLocale) InvalidPropertyPattern() string { + return `Property "{{.property}}" does not match pattern {{.pattern}}` +} + +func (l DefaultLocale) StringGTE() string { + return `String length must be greater than or equal to {{.min}}` +} + +func (l DefaultLocale) StringLTE() string { + return `String length must be less than or equal to {{.max}}` +} + +func (l DefaultLocale) DoesNotMatchPattern() string { + return `Does not match pattern '{{.pattern}}'` +} + +func (l DefaultLocale) DoesNotMatchFormat() string { + return `Does not match format '{{.format}}'` +} + +func (l DefaultLocale) MultipleOf() string { + return `Must be a multiple of {{.multiple}}` +} + +func (l DefaultLocale) NumberGTE() string { + return `Must be greater than or equal to {{.min}}` +} + +func (l DefaultLocale) NumberGT() string { + return `Must be greater than {{.min}}` +} + +func (l DefaultLocale) NumberLTE() string { + return `Must be less than or equal to {{.max}}` +} + +func (l DefaultLocale) NumberLT() string { + return `Must be less than {{.max}}` +} + +// Schema validators +func (l DefaultLocale) RegexPattern() string { + return `Invalid regex pattern '{{.pattern}}'` +} + +func (l DefaultLocale) GreaterThanZero() string { + return `{{.number}} must be strictly greater than 0` +} + +func (l DefaultLocale) MustBeOfA() string { + return `{{.x}} must be of a {{.y}}` +} + +func (l DefaultLocale) MustBeOfAn() string { + return `{{.x}} must be of an {{.y}}` +} + +func (l DefaultLocale) CannotBeUsedWithout() string { + return `{{.x}} cannot be used without {{.y}}` +} + +func (l DefaultLocale) CannotBeGT() string { + return `{{.x}} cannot be greater than {{.y}}` +} + +func (l DefaultLocale) MustBeOfType() string { + return `{{.key}} must be of type {{.type}}` +} + +func (l DefaultLocale) MustBeValidRegex() string { + return `{{.key}} must be a valid regex` +} + +func (l DefaultLocale) MustBeValidFormat() string { + return `{{.key}} must be a valid format {{.given}}` +} + +func (l DefaultLocale) MustBeGTEZero() string { + return `{{.key}} must be greater than or equal to 0` +} + +func (l DefaultLocale) KeyCannotBeGreaterThan() string { + return `{{.key}} cannot be greater than {{.y}}` +} + +func (l DefaultLocale) KeyItemsMustBeOfType() string { + return `{{.key}} items must be {{.type}}` +} + +func (l DefaultLocale) KeyItemsMustBeUnique() string { + return `{{.key}} items must be unique` +} + +func (l DefaultLocale) ReferenceMustBeCanonical() string { + return `Reference {{.reference}} must be canonical` +} + +func (l DefaultLocale) NotAValidType() string { + return `has a primitive type that is NOT VALID -- given: {{.given}} Expected valid values are:{{.expected}}` +} + +func (l DefaultLocale) Duplicated() string { + return `{{.type}} type is duplicated` +} + +func (l DefaultLocale) HttpBadStatus() string { + return `Could not read schema from HTTP, response status is {{.status}}` +} + +// Replacement options: field, description, context, value +func (l DefaultLocale) ErrorFormat() string { + return `{{.field}}: {{.description}}` +} + +//Parse error +func (l DefaultLocale) ParseError() string { + return `Expected: {{.expected}}, given: Invalid JSON` +} + +const ( + STRING_NUMBER = "number" + STRING_ARRAY_OF_STRINGS = "array of strings" + STRING_ARRAY_OF_SCHEMAS = "array of schemas" + STRING_SCHEMA = "valid schema" + STRING_SCHEMA_OR_ARRAY_OF_STRINGS = "schema or array of strings" + STRING_PROPERTIES = "properties" + STRING_DEPENDENCY = "dependency" + STRING_PROPERTY = "property" + STRING_UNDEFINED = "undefined" + STRING_CONTEXT_ROOT = "(root)" + STRING_ROOT_SCHEMA_PROPERTY = "(root)" +) diff --git a/vendor/github.com/xeipuuv/gojsonschema/result.go b/vendor/github.com/xeipuuv/gojsonschema/result.go new file mode 100644 index 00000000..6ad56ae8 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/result.go @@ -0,0 +1,172 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Result and ResultError implementations. +// +// created 01-01-2015 + +package gojsonschema + +import ( + "fmt" + "strings" +) + +type ( + // ErrorDetails is a map of details specific to each error. + // While the values will vary, every error will contain a "field" value + ErrorDetails map[string]interface{} + + // ResultError is the interface that library errors must implement + ResultError interface { + Field() string + SetType(string) + Type() string + SetContext(*jsonContext) + Context() *jsonContext + SetDescription(string) + Description() string + SetValue(interface{}) + Value() interface{} + SetDetails(ErrorDetails) + Details() ErrorDetails + String() string + } + + // ResultErrorFields holds the fields for each ResultError implementation. + // ResultErrorFields implements the ResultError interface, so custom errors + // can be defined by just embedding this type + ResultErrorFields struct { + errorType string // A string with the type of error (i.e. invalid_type) + context *jsonContext // Tree like notation of the part that failed the validation. ex (root).a.b ... + description string // A human readable error message + value interface{} // Value given by the JSON file that is the source of the error + details ErrorDetails + } + + Result struct { + errors []ResultError + // Scores how well the validation matched. Useful in generating + // better error messages for anyOf and oneOf. + score int + } +) + +// Field outputs the field name without the root context +// i.e. firstName or person.firstName instead of (root).firstName or (root).person.firstName +func (v *ResultErrorFields) Field() string { + if p, ok := v.Details()["property"]; ok { + if str, isString := p.(string); isString { + return str + } + } + + return strings.TrimPrefix(v.context.String(), STRING_ROOT_SCHEMA_PROPERTY+".") +} + +func (v *ResultErrorFields) SetType(errorType string) { + v.errorType = errorType +} + +func (v *ResultErrorFields) Type() string { + return v.errorType +} + +func (v *ResultErrorFields) SetContext(context *jsonContext) { + v.context = context +} + +func (v *ResultErrorFields) Context() *jsonContext { + return v.context +} + +func (v *ResultErrorFields) SetDescription(description string) { + v.description = description +} + +func (v *ResultErrorFields) Description() string { + return v.description +} + +func (v *ResultErrorFields) SetValue(value interface{}) { + v.value = value +} + +func (v *ResultErrorFields) Value() interface{} { + return v.value +} + +func (v *ResultErrorFields) SetDetails(details ErrorDetails) { + v.details = details +} + +func (v *ResultErrorFields) Details() ErrorDetails { + return v.details +} + +func (v ResultErrorFields) String() string { + // as a fallback, the value is displayed go style + valueString := fmt.Sprintf("%v", v.value) + + // marshal the go value value to json + if v.value == nil { + valueString = TYPE_NULL + } else { + if vs, err := marshalToJsonString(v.value); err == nil { + if vs == nil { + valueString = TYPE_NULL + } else { + valueString = *vs + } + } + } + + return formatErrorDescription(Locale.ErrorFormat(), ErrorDetails{ + "context": v.context.String(), + "description": v.description, + "value": valueString, + "field": v.Field(), + }) +} + +func (v *Result) Valid() bool { + return len(v.errors) == 0 +} + +func (v *Result) Errors() []ResultError { + return v.errors +} + +func (v *Result) addError(err ResultError, context *jsonContext, value interface{}, details ErrorDetails) { + newError(err, context, value, Locale, details) + v.errors = append(v.errors, err) + v.score -= 2 // results in a net -1 when added to the +1 we get at the end of the validation function +} + +// Used to copy errors from a sub-schema to the main one +func (v *Result) mergeErrors(otherResult *Result) { + v.errors = append(v.errors, otherResult.Errors()...) + v.score += otherResult.score +} + +func (v *Result) incrementScore() { + v.score++ +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schema.go b/vendor/github.com/xeipuuv/gojsonschema/schema.go new file mode 100644 index 00000000..f1fbde38 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schema.go @@ -0,0 +1,971 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines Schema, the main entry to every subSchema. +// Contains the parsing logic and error checking. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + "reflect" + "regexp" + "text/template" + + "github.com/xeipuuv/gojsonreference" +) + +var ( + // Locale is the default locale to use + // Library users can overwrite with their own implementation + Locale locale = DefaultLocale{} + + // ErrorTemplateFuncs allows you to define custom template funcs for use in localization. + ErrorTemplateFuncs template.FuncMap +) + +func NewSchema(l JSONLoader) (*Schema, error) { + ref, err := l.JsonReference() + if err != nil { + return nil, err + } + + d := Schema{} + d.pool = newSchemaPool(l.LoaderFactory()) + d.documentReference = ref + d.referencePool = newSchemaReferencePool() + + var spd *schemaPoolDocument + var doc interface{} + if ref.String() != "" { + // Get document from schema pool + spd, err = d.pool.GetDocument(d.documentReference) + if err != nil { + return nil, err + } + doc = spd.Document + + // Deal with fragment pointers + jsonPointer := ref.GetPointer() + doc, _, err = jsonPointer.Get(doc) + if err != nil { + return nil, err + } + } else { + // Load JSON directly + doc, err = l.LoadJSON() + if err != nil { + return nil, err + } + } + d.pool.SetStandaloneDocument(doc) + + err = d.parse(doc) + if err != nil { + return nil, err + } + + return &d, nil +} + +type Schema struct { + documentReference gojsonreference.JsonReference + rootSchema *subSchema + pool *schemaPool + referencePool *schemaReferencePool +} + +func (d *Schema) parse(document interface{}) error { + d.rootSchema = &subSchema{property: STRING_ROOT_SCHEMA_PROPERTY} + return d.parseSchema(document, d.rootSchema) +} + +func (d *Schema) SetRootSchemaName(name string) { + d.rootSchema.property = name +} + +// Parses a subSchema +// +// Pretty long function ( sorry :) )... but pretty straight forward, repetitive and boring +// Not much magic involved here, most of the job is to validate the key names and their values, +// then the values are copied into subSchema struct +// +func (d *Schema) parseSchema(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.ParseError(), + ErrorDetails{ + "expected": STRING_SCHEMA, + }, + )) + } + if currentSchema.parent == nil { + currentSchema.ref = &d.documentReference + currentSchema.id = &d.documentReference + } + + if currentSchema.id == nil && currentSchema.parent != nil { + currentSchema.id = currentSchema.parent.id + } + + m := documentNode.(map[string]interface{}) + + // id + if existsMapKey(m, KEY_ID) && !isKind(m[KEY_ID], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_ID, + }, + )) + } + if k, ok := m[KEY_ID].(string); ok { + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + if currentSchema == d.rootSchema { + currentSchema.id = &jsonReference + } else { + ref, err := currentSchema.parent.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.id = ref + } + } + + // Add schema to document cache. The same id is passed down to subsequent + // subschemas, but as only the first and top one is used it will always reference + // the correct schema. Doing it once here prevents having + // to do this same step at every corner case. + d.referencePool.Add(currentSchema.id.String(), currentSchema) + + // $subSchema + if existsMapKey(m, KEY_SCHEMA) { + if !isKind(m[KEY_SCHEMA], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_SCHEMA, + }, + )) + } + schemaRef := m[KEY_SCHEMA].(string) + schemaReference, err := gojsonreference.NewJsonReference(schemaRef) + currentSchema.subSchema = &schemaReference + if err != nil { + return err + } + } + + // $ref + if existsMapKey(m, KEY_REF) && !isKind(m[KEY_REF], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_REF, + }, + )) + } + if k, ok := m[KEY_REF].(string); ok { + + jsonReference, err := gojsonreference.NewJsonReference(k) + if err != nil { + return err + } + + if jsonReference.HasFullUrl { + currentSchema.ref = &jsonReference + } else { + inheritedReference, err := currentSchema.id.Inherits(jsonReference) + if err != nil { + return err + } + currentSchema.ref = inheritedReference + } + if sch, ok := d.referencePool.Get(currentSchema.ref.String()); ok { + currentSchema.refSchema = sch + } else { + err := d.parseReference(documentNode, currentSchema) + + if err != nil { + return err + } + + return nil + } + } + + // definitions + if existsMapKey(m, KEY_DEFINITIONS) { + if isKind(m[KEY_DEFINITIONS], reflect.Map) { + currentSchema.definitions = make(map[string]*subSchema) + for dk, dv := range m[KEY_DEFINITIONS].(map[string]interface{}) { + if isKind(dv, reflect.Map) { + + ref, err := gojsonreference.NewJsonReference("#/" + KEY_DEFINITIONS + "/" + dk) + if err != nil { + return err + } + + newSchemaID, err := currentSchema.id.Inherits(ref) + if err != nil { + return err + } + newSchema := &subSchema{property: KEY_DEFINITIONS, parent: currentSchema, id: newSchemaID} + currentSchema.definitions[dk] = newSchema + + err = d.parseSchema(dv, newSchema) + + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_ARRAY_OF_SCHEMAS, + "given": KEY_DEFINITIONS, + }, + )) + } + + } + + // title + if existsMapKey(m, KEY_TITLE) && !isKind(m[KEY_TITLE], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_TITLE, + }, + )) + } + if k, ok := m[KEY_TITLE].(string); ok { + currentSchema.title = &k + } + + // description + if existsMapKey(m, KEY_DESCRIPTION) && !isKind(m[KEY_DESCRIPTION], reflect.String) { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING, + "given": KEY_DESCRIPTION, + }, + )) + } + if k, ok := m[KEY_DESCRIPTION].(string); ok { + currentSchema.description = &k + } + + // type + if existsMapKey(m, KEY_TYPE) { + if isKind(m[KEY_TYPE], reflect.String) { + if k, ok := m[KEY_TYPE].(string); ok { + err := currentSchema.types.Add(k) + if err != nil { + return err + } + } + } else { + if isKind(m[KEY_TYPE], reflect.Slice) { + arrayOfTypes := m[KEY_TYPE].([]interface{}) + for _, typeInArray := range arrayOfTypes { + if reflect.ValueOf(typeInArray).Kind() != reflect.String { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } else { + currentSchema.types.Add(typeInArray.(string)) + } + } + + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_STRING + "/" + STRING_ARRAY_OF_STRINGS, + "given": KEY_TYPE, + }, + )) + } + } + } + + // properties + if existsMapKey(m, KEY_PROPERTIES) { + err := d.parseProperties(m[KEY_PROPERTIES], currentSchema) + if err != nil { + return err + } + } + + // additionalProperties + if existsMapKey(m, KEY_ADDITIONAL_PROPERTIES) { + if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Bool) { + currentSchema.additionalProperties = m[KEY_ADDITIONAL_PROPERTIES].(bool) + } else if isKind(m[KEY_ADDITIONAL_PROPERTIES], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_PROPERTIES, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalProperties = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_PROPERTIES], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_PROPERTIES, + }, + )) + } + } + + // patternProperties + if existsMapKey(m, KEY_PATTERN_PROPERTIES) { + if isKind(m[KEY_PATTERN_PROPERTIES], reflect.Map) { + patternPropertiesMap := m[KEY_PATTERN_PROPERTIES].(map[string]interface{}) + if len(patternPropertiesMap) > 0 { + currentSchema.patternProperties = make(map[string]*subSchema) + for k, v := range patternPropertiesMap { + _, err := regexp.MatchString(k, "") + if err != nil { + return errors.New(formatErrorDescription( + Locale.RegexPattern(), + ErrorDetails{"pattern": k}, + )) + } + newSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err = d.parseSchema(v, newSchema) + if err != nil { + return errors.New(err.Error()) + } + currentSchema.patternProperties[k] = newSchema + } + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA, + "given": KEY_PATTERN_PROPERTIES, + }, + )) + } + } + + // dependencies + if existsMapKey(m, KEY_DEPENDENCIES) { + err := d.parseDependencies(m[KEY_DEPENDENCIES], currentSchema) + if err != nil { + return err + } + } + + // items + if existsMapKey(m, KEY_ITEMS) { + if isKind(m[KEY_ITEMS], reflect.Slice) { + for _, itemElement := range m[KEY_ITEMS].([]interface{}) { + if isKind(itemElement, reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(itemElement, newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + currentSchema.itemsChildrenIsSingleSchema = false + } + } else if isKind(m[KEY_ITEMS], reflect.Map) { + newSchema := &subSchema{parent: currentSchema, property: KEY_ITEMS} + newSchema.ref = currentSchema.ref + currentSchema.AddItemsChild(newSchema) + err := d.parseSchema(m[KEY_ITEMS], newSchema) + if err != nil { + return err + } + currentSchema.itemsChildrenIsSingleSchema = true + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_SCHEMA + "/" + STRING_ARRAY_OF_SCHEMAS, + "given": KEY_ITEMS, + }, + )) + } + } + + // additionalItems + if existsMapKey(m, KEY_ADDITIONAL_ITEMS) { + if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Bool) { + currentSchema.additionalItems = m[KEY_ADDITIONAL_ITEMS].(bool) + } else if isKind(m[KEY_ADDITIONAL_ITEMS], reflect.Map) { + newSchema := &subSchema{property: KEY_ADDITIONAL_ITEMS, parent: currentSchema, ref: currentSchema.ref} + currentSchema.additionalItems = newSchema + err := d.parseSchema(m[KEY_ADDITIONAL_ITEMS], newSchema) + if err != nil { + return errors.New(err.Error()) + } + } else { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": TYPE_BOOLEAN + "/" + STRING_SCHEMA, + "given": KEY_ADDITIONAL_ITEMS, + }, + )) + } + } + + // validation : number / integer + + if existsMapKey(m, KEY_MULTIPLE_OF) { + multipleOfValue := mustBeNumber(m[KEY_MULTIPLE_OF]) + if multipleOfValue == nil { + return errors.New(formatErrorDescription( + Locale.InvalidType(), + ErrorDetails{ + "expected": STRING_NUMBER, + "given": KEY_MULTIPLE_OF, + }, + )) + } + if *multipleOfValue <= 0 { + return errors.New(formatErrorDescription( + Locale.GreaterThanZero(), + ErrorDetails{"number": KEY_MULTIPLE_OF}, + )) + } + currentSchema.multipleOf = multipleOfValue + } + + if existsMapKey(m, KEY_MINIMUM) { + minimumValue := mustBeNumber(m[KEY_MINIMUM]) + if minimumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MINIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.minimum = minimumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MINIMUM) { + if isKind(m[KEY_EXCLUSIVE_MINIMUM], reflect.Bool) { + if currentSchema.minimum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": KEY_MINIMUM}, + )) + } + exclusiveMinimumValue := m[KEY_EXCLUSIVE_MINIMUM].(bool) + currentSchema.exclusiveMinimum = exclusiveMinimumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MINIMUM, "y": TYPE_BOOLEAN}, + )) + } + } + + if existsMapKey(m, KEY_MAXIMUM) { + maximumValue := mustBeNumber(m[KEY_MAXIMUM]) + if maximumValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_MAXIMUM, "y": STRING_NUMBER}, + )) + } + currentSchema.maximum = maximumValue + } + + if existsMapKey(m, KEY_EXCLUSIVE_MAXIMUM) { + if isKind(m[KEY_EXCLUSIVE_MAXIMUM], reflect.Bool) { + if currentSchema.maximum == nil { + return errors.New(formatErrorDescription( + Locale.CannotBeUsedWithout(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": KEY_MAXIMUM}, + )) + } + exclusiveMaximumValue := m[KEY_EXCLUSIVE_MAXIMUM].(bool) + currentSchema.exclusiveMaximum = exclusiveMaximumValue + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_EXCLUSIVE_MAXIMUM, "y": STRING_NUMBER}, + )) + } + } + + if currentSchema.minimum != nil && currentSchema.maximum != nil { + if *currentSchema.minimum > *currentSchema.maximum { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MINIMUM, "y": KEY_MAXIMUM}, + )) + } + } + + // validation : string + + if existsMapKey(m, KEY_MIN_LENGTH) { + minLengthIntegerValue := mustBeInteger(m[KEY_MIN_LENGTH]) + if minLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *minLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_LENGTH}, + )) + } + currentSchema.minLength = minLengthIntegerValue + } + + if existsMapKey(m, KEY_MAX_LENGTH) { + maxLengthIntegerValue := mustBeInteger(m[KEY_MAX_LENGTH]) + if maxLengthIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_LENGTH, "y": TYPE_INTEGER}, + )) + } + if *maxLengthIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_LENGTH}, + )) + } + currentSchema.maxLength = maxLengthIntegerValue + } + + if currentSchema.minLength != nil && currentSchema.maxLength != nil { + if *currentSchema.minLength > *currentSchema.maxLength { + return errors.New(formatErrorDescription( + Locale.CannotBeGT(), + ErrorDetails{"x": KEY_MIN_LENGTH, "y": KEY_MAX_LENGTH}, + )) + } + } + + if existsMapKey(m, KEY_PATTERN) { + if isKind(m[KEY_PATTERN], reflect.String) { + regexpObject, err := regexp.Compile(m[KEY_PATTERN].(string)) + if err != nil { + return errors.New(formatErrorDescription( + Locale.MustBeValidRegex(), + ErrorDetails{"key": KEY_PATTERN}, + )) + } + currentSchema.pattern = regexpObject + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_PATTERN, "y": TYPE_STRING}, + )) + } + } + + if existsMapKey(m, KEY_FORMAT) { + formatString, ok := m[KEY_FORMAT].(string) + if ok && FormatCheckers.Has(formatString) { + currentSchema.format = formatString + } + } + + // validation : object + + if existsMapKey(m, KEY_MIN_PROPERTIES) { + minPropertiesIntegerValue := mustBeInteger(m[KEY_MIN_PROPERTIES]) + if minPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *minPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_PROPERTIES}, + )) + } + currentSchema.minProperties = minPropertiesIntegerValue + } + + if existsMapKey(m, KEY_MAX_PROPERTIES) { + maxPropertiesIntegerValue := mustBeInteger(m[KEY_MAX_PROPERTIES]) + if maxPropertiesIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_PROPERTIES, "y": TYPE_INTEGER}, + )) + } + if *maxPropertiesIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_PROPERTIES}, + )) + } + currentSchema.maxProperties = maxPropertiesIntegerValue + } + + if currentSchema.minProperties != nil && currentSchema.maxProperties != nil { + if *currentSchema.minProperties > *currentSchema.maxProperties { + return errors.New(formatErrorDescription( + Locale.KeyCannotBeGreaterThan(), + ErrorDetails{"key": KEY_MIN_PROPERTIES, "y": KEY_MAX_PROPERTIES}, + )) + } + } + + if existsMapKey(m, KEY_REQUIRED) { + if isKind(m[KEY_REQUIRED], reflect.Slice) { + requiredValues := m[KEY_REQUIRED].([]interface{}) + for _, requiredValue := range requiredValues { + if isKind(requiredValue, reflect.String) { + err := currentSchema.AddRequired(requiredValue.(string)) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeOfType(), + ErrorDetails{"key": KEY_REQUIRED, "type": TYPE_STRING}, + )) + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_REQUIRED, "y": TYPE_ARRAY}, + )) + } + } + + // validation : array + + if existsMapKey(m, KEY_MIN_ITEMS) { + minItemsIntegerValue := mustBeInteger(m[KEY_MIN_ITEMS]) + if minItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MIN_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *minItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MIN_ITEMS}, + )) + } + currentSchema.minItems = minItemsIntegerValue + } + + if existsMapKey(m, KEY_MAX_ITEMS) { + maxItemsIntegerValue := mustBeInteger(m[KEY_MAX_ITEMS]) + if maxItemsIntegerValue == nil { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_MAX_ITEMS, "y": TYPE_INTEGER}, + )) + } + if *maxItemsIntegerValue < 0 { + return errors.New(formatErrorDescription( + Locale.MustBeGTEZero(), + ErrorDetails{"key": KEY_MAX_ITEMS}, + )) + } + currentSchema.maxItems = maxItemsIntegerValue + } + + if existsMapKey(m, KEY_UNIQUE_ITEMS) { + if isKind(m[KEY_UNIQUE_ITEMS], reflect.Bool) { + currentSchema.uniqueItems = m[KEY_UNIQUE_ITEMS].(bool) + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfA(), + ErrorDetails{"x": KEY_UNIQUE_ITEMS, "y": TYPE_BOOLEAN}, + )) + } + } + + // validation : all + + if existsMapKey(m, KEY_ENUM) { + if isKind(m[KEY_ENUM], reflect.Slice) { + for _, v := range m[KEY_ENUM].([]interface{}) { + err := currentSchema.AddEnum(v) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ENUM, "y": TYPE_ARRAY}, + )) + } + } + + // validation : subSchema + + if existsMapKey(m, KEY_ONE_OF) { + if isKind(m[KEY_ONE_OF], reflect.Slice) { + for _, v := range m[KEY_ONE_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ONE_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddOneOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ONE_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ANY_OF) { + if isKind(m[KEY_ANY_OF], reflect.Slice) { + for _, v := range m[KEY_ANY_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ANY_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAnyOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_ALL_OF) { + if isKind(m[KEY_ALL_OF], reflect.Slice) { + for _, v := range m[KEY_ALL_OF].([]interface{}) { + newSchema := &subSchema{property: KEY_ALL_OF, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddAllOf(newSchema) + err := d.parseSchema(v, newSchema) + if err != nil { + return err + } + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_ANY_OF, "y": TYPE_ARRAY}, + )) + } + } + + if existsMapKey(m, KEY_NOT) { + if isKind(m[KEY_NOT], reflect.Map) { + newSchema := &subSchema{property: KEY_NOT, parent: currentSchema, ref: currentSchema.ref} + currentSchema.SetNot(newSchema) + err := d.parseSchema(m[KEY_NOT], newSchema) + if err != nil { + return err + } + } else { + return errors.New(formatErrorDescription( + Locale.MustBeOfAn(), + ErrorDetails{"x": KEY_NOT, "y": TYPE_OBJECT}, + )) + } + } + + return nil +} + +func (d *Schema) parseReference(documentNode interface{}, currentSchema *subSchema) error { + var ( + refdDocumentNode interface{} + dsp *schemaPoolDocument + err error + ) + jsonPointer := currentSchema.ref.GetPointer() + standaloneDocument := d.pool.GetStandaloneDocument() + + newSchema := &subSchema{property: KEY_REF, parent: currentSchema, ref: currentSchema.ref} + + if currentSchema.ref.HasFragmentOnly { + refdDocumentNode, _, err = jsonPointer.Get(standaloneDocument) + if err != nil { + return err + } + + } else { + dsp, err = d.pool.GetDocument(*currentSchema.ref) + if err != nil { + return err + } + newSchema.id = currentSchema.ref + + refdDocumentNode, _, err = jsonPointer.Get(dsp.Document) + + if err != nil { + return err + } + + } + + if !isKind(refdDocumentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_SCHEMA, "type": TYPE_OBJECT}, + )) + } + + // returns the loaded referenced subSchema for the caller to update its current subSchema + newSchemaDocument := refdDocumentNode.(map[string]interface{}) + + err = d.parseSchema(newSchemaDocument, newSchema) + if err != nil { + return err + } + + currentSchema.refSchema = newSchema + + return nil + +} + +func (d *Schema) parseProperties(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": STRING_PROPERTIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + for k := range m { + schemaProperty := k + newSchema := &subSchema{property: schemaProperty, parent: currentSchema, ref: currentSchema.ref} + currentSchema.AddPropertiesChild(newSchema) + err := d.parseSchema(m[k], newSchema) + if err != nil { + return err + } + } + + return nil +} + +func (d *Schema) parseDependencies(documentNode interface{}, currentSchema *subSchema) error { + + if !isKind(documentNode, reflect.Map) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{"key": KEY_DEPENDENCIES, "type": TYPE_OBJECT}, + )) + } + + m := documentNode.(map[string]interface{}) + currentSchema.dependencies = make(map[string]interface{}) + + for k := range m { + switch reflect.ValueOf(m[k]).Kind() { + + case reflect.Slice: + values := m[k].([]interface{}) + var valuesToRegister []string + + for _, value := range values { + if !isKind(value, reflect.String) { + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } else { + valuesToRegister = append(valuesToRegister, value.(string)) + } + currentSchema.dependencies[k] = valuesToRegister + } + + case reflect.Map: + depSchema := &subSchema{property: k, parent: currentSchema, ref: currentSchema.ref} + err := d.parseSchema(m[k], depSchema) + if err != nil { + return err + } + currentSchema.dependencies[k] = depSchema + + default: + return errors.New(formatErrorDescription( + Locale.MustBeOfType(), + ErrorDetails{ + "key": STRING_DEPENDENCY, + "type": STRING_SCHEMA_OR_ARRAY_OF_STRINGS, + }, + )) + } + + } + + return nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go new file mode 100644 index 00000000..ff9715f0 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaPool.go @@ -0,0 +1,103 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines resources pooling. +// Eases referencing and avoids downloading the same resource twice. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "errors" + + "github.com/xeipuuv/gojsonreference" +) + +type schemaPoolDocument struct { + Document interface{} +} + +type schemaPool struct { + schemaPoolDocuments map[string]*schemaPoolDocument + standaloneDocument interface{} + jsonLoaderFactory JSONLoaderFactory +} + +func newSchemaPool(f JSONLoaderFactory) *schemaPool { + + p := &schemaPool{} + p.schemaPoolDocuments = make(map[string]*schemaPoolDocument) + p.standaloneDocument = nil + p.jsonLoaderFactory = f + + return p +} + +func (p *schemaPool) SetStandaloneDocument(document interface{}) { + p.standaloneDocument = document +} + +func (p *schemaPool) GetStandaloneDocument() (document interface{}) { + return p.standaloneDocument +} + +func (p *schemaPool) GetDocument(reference gojsonreference.JsonReference) (*schemaPoolDocument, error) { + + var ( + spd *schemaPoolDocument + ok bool + err error + ) + + if internalLogEnabled { + internalLog("Get Document ( %s )", reference.String()) + } + + // It is not possible to load anything that is not canonical... + if !reference.IsCanonical() { + return nil, errors.New(formatErrorDescription( + Locale.ReferenceMustBeCanonical(), + ErrorDetails{"reference": reference}, + )) + } + refToUrl := reference + refToUrl.GetUrl().Fragment = "" + + if spd, ok = p.schemaPoolDocuments[refToUrl.String()]; ok { + if internalLogEnabled { + internalLog(" From pool") + } + return spd, nil + } + + jsonReferenceLoader := p.jsonLoaderFactory.New(reference.String()) + document, err := jsonReferenceLoader.LoadJSON() + if err != nil { + return nil, err + } + + spd = &schemaPoolDocument{Document: document} + // add the document to the pool for potential later use + p.schemaPoolDocuments[refToUrl.String()] = spd + + return spd, nil +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go new file mode 100644 index 00000000..6e5e1b5c --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaReferencePool.go @@ -0,0 +1,68 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Pool of referenced schemas. +// +// created 25-06-2013 + +package gojsonschema + +import ( + "fmt" +) + +type schemaReferencePool struct { + documents map[string]*subSchema +} + +func newSchemaReferencePool() *schemaReferencePool { + + p := &schemaReferencePool{} + p.documents = make(map[string]*subSchema) + + return p +} + +func (p *schemaReferencePool) Get(ref string) (r *subSchema, o bool) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Schema Reference ( %s )", ref)) + } + + if sch, ok := p.documents[ref]; ok { + if internalLogEnabled { + internalLog(fmt.Sprintf(" From pool")) + } + return sch, true + } + + return nil, false +} + +func (p *schemaReferencePool) Add(ref string, sch *subSchema) { + + if internalLogEnabled { + internalLog(fmt.Sprintf("Add Schema Reference %s to pool", ref)) + } + if _, ok := p.documents[ref]; !ok { + p.documents[ref] = sch + } +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/schemaType.go b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go new file mode 100644 index 00000000..36b447a2 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/schemaType.go @@ -0,0 +1,83 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Helper structure to handle schema types, and the combination of them. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "errors" + "fmt" + "strings" +) + +type jsonSchemaType struct { + types []string +} + +// Is the schema typed ? that is containing at least one type +// When not typed, the schema does not need any type validation +func (t *jsonSchemaType) IsTyped() bool { + return len(t.types) > 0 +} + +func (t *jsonSchemaType) Add(etype string) error { + + if !isStringInSlice(JSON_TYPES, etype) { + return errors.New(formatErrorDescription(Locale.NotAValidType(), ErrorDetails{"given": "/" + etype + "/", "expected": JSON_TYPES})) + } + + if t.Contains(etype) { + return errors.New(formatErrorDescription(Locale.Duplicated(), ErrorDetails{"type": etype})) + } + + t.types = append(t.types, etype) + + return nil +} + +func (t *jsonSchemaType) Contains(etype string) bool { + + for _, v := range t.types { + if v == etype { + return true + } + } + + return false +} + +func (t *jsonSchemaType) String() string { + + if len(t.types) == 0 { + return STRING_UNDEFINED // should never happen + } + + // Displayed as a list [type1,type2,...] + if len(t.types) > 1 { + return fmt.Sprintf("[%s]", strings.Join(t.types, ",")) + } + + // Only one type: name only + return t.types[0] +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/subSchema.go b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go new file mode 100644 index 00000000..9961d92b --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/subSchema.go @@ -0,0 +1,227 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Defines the structure of a sub-subSchema. +// A sub-subSchema can contain other sub-schemas. +// +// created 27-02-2013 + +package gojsonschema + +import ( + "errors" + "regexp" + "strings" + + "github.com/xeipuuv/gojsonreference" +) + +const ( + KEY_SCHEMA = "$subSchema" + KEY_ID = "id" + KEY_REF = "$ref" + KEY_TITLE = "title" + KEY_DESCRIPTION = "description" + KEY_TYPE = "type" + KEY_ITEMS = "items" + KEY_ADDITIONAL_ITEMS = "additionalItems" + KEY_PROPERTIES = "properties" + KEY_PATTERN_PROPERTIES = "patternProperties" + KEY_ADDITIONAL_PROPERTIES = "additionalProperties" + KEY_DEFINITIONS = "definitions" + KEY_MULTIPLE_OF = "multipleOf" + KEY_MINIMUM = "minimum" + KEY_MAXIMUM = "maximum" + KEY_EXCLUSIVE_MINIMUM = "exclusiveMinimum" + KEY_EXCLUSIVE_MAXIMUM = "exclusiveMaximum" + KEY_MIN_LENGTH = "minLength" + KEY_MAX_LENGTH = "maxLength" + KEY_PATTERN = "pattern" + KEY_FORMAT = "format" + KEY_MIN_PROPERTIES = "minProperties" + KEY_MAX_PROPERTIES = "maxProperties" + KEY_DEPENDENCIES = "dependencies" + KEY_REQUIRED = "required" + KEY_MIN_ITEMS = "minItems" + KEY_MAX_ITEMS = "maxItems" + KEY_UNIQUE_ITEMS = "uniqueItems" + KEY_ENUM = "enum" + KEY_ONE_OF = "oneOf" + KEY_ANY_OF = "anyOf" + KEY_ALL_OF = "allOf" + KEY_NOT = "not" +) + +type subSchema struct { + + // basic subSchema meta properties + id *gojsonreference.JsonReference + title *string + description *string + + property string + + // Types associated with the subSchema + types jsonSchemaType + + // Reference url + ref *gojsonreference.JsonReference + // Schema referenced + refSchema *subSchema + // Json reference + subSchema *gojsonreference.JsonReference + + // hierarchy + parent *subSchema + definitions map[string]*subSchema + definitionsChildren []*subSchema + itemsChildren []*subSchema + itemsChildrenIsSingleSchema bool + propertiesChildren []*subSchema + + // validation : number / integer + multipleOf *float64 + maximum *float64 + exclusiveMaximum bool + minimum *float64 + exclusiveMinimum bool + + // validation : string + minLength *int + maxLength *int + pattern *regexp.Regexp + format string + + // validation : object + minProperties *int + maxProperties *int + required []string + + dependencies map[string]interface{} + additionalProperties interface{} + patternProperties map[string]*subSchema + + // validation : array + minItems *int + maxItems *int + uniqueItems bool + + additionalItems interface{} + + // validation : all + enum []string + + // validation : subSchema + oneOf []*subSchema + anyOf []*subSchema + allOf []*subSchema + not *subSchema +} + +func (s *subSchema) AddEnum(i interface{}) error { + + is, err := marshalToJsonString(i) + if err != nil { + return err + } + + if isStringInSlice(s.enum, *is) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_ENUM}, + )) + } + + s.enum = append(s.enum, *is) + + return nil +} + +func (s *subSchema) ContainsEnum(i interface{}) (bool, error) { + + is, err := marshalToJsonString(i) + if err != nil { + return false, err + } + + return isStringInSlice(s.enum, *is), nil +} + +func (s *subSchema) AddOneOf(subSchema *subSchema) { + s.oneOf = append(s.oneOf, subSchema) +} + +func (s *subSchema) AddAllOf(subSchema *subSchema) { + s.allOf = append(s.allOf, subSchema) +} + +func (s *subSchema) AddAnyOf(subSchema *subSchema) { + s.anyOf = append(s.anyOf, subSchema) +} + +func (s *subSchema) SetNot(subSchema *subSchema) { + s.not = subSchema +} + +func (s *subSchema) AddRequired(value string) error { + + if isStringInSlice(s.required, value) { + return errors.New(formatErrorDescription( + Locale.KeyItemsMustBeUnique(), + ErrorDetails{"key": KEY_REQUIRED}, + )) + } + + s.required = append(s.required, value) + + return nil +} + +func (s *subSchema) AddDefinitionChild(child *subSchema) { + s.definitionsChildren = append(s.definitionsChildren, child) +} + +func (s *subSchema) AddItemsChild(child *subSchema) { + s.itemsChildren = append(s.itemsChildren, child) +} + +func (s *subSchema) AddPropertiesChild(child *subSchema) { + s.propertiesChildren = append(s.propertiesChildren, child) +} + +func (s *subSchema) PatternPropertiesString() string { + + if s.patternProperties == nil || len(s.patternProperties) == 0 { + return STRING_UNDEFINED // should never happen + } + + patternPropertiesKeySlice := []string{} + for pk := range s.patternProperties { + patternPropertiesKeySlice = append(patternPropertiesKeySlice, `"`+pk+`"`) + } + + if len(patternPropertiesKeySlice) == 1 { + return patternPropertiesKeySlice[0] + } + + return "[" + strings.Join(patternPropertiesKeySlice, ",") + "]" + +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/types.go b/vendor/github.com/xeipuuv/gojsonschema/types.go new file mode 100644 index 00000000..952d22ef --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/types.go @@ -0,0 +1,58 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Contains const types for schema and JSON. +// +// created 28-02-2013 + +package gojsonschema + +const ( + TYPE_ARRAY = `array` + TYPE_BOOLEAN = `boolean` + TYPE_INTEGER = `integer` + TYPE_NUMBER = `number` + TYPE_NULL = `null` + TYPE_OBJECT = `object` + TYPE_STRING = `string` +) + +var JSON_TYPES []string +var SCHEMA_TYPES []string + +func init() { + JSON_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_NULL, + TYPE_OBJECT, + TYPE_STRING} + + SCHEMA_TYPES = []string{ + TYPE_ARRAY, + TYPE_BOOLEAN, + TYPE_INTEGER, + TYPE_NUMBER, + TYPE_OBJECT, + TYPE_STRING} +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/utils.go b/vendor/github.com/xeipuuv/gojsonschema/utils.go new file mode 100644 index 00000000..26cf75eb --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/utils.go @@ -0,0 +1,208 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Various utility functions. +// +// created 26-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "fmt" + "math" + "reflect" + "strconv" +) + +func isKind(what interface{}, kind reflect.Kind) bool { + target := what + if isJsonNumber(what) { + // JSON Numbers are strings! + target = *mustBeNumber(what) + } + return reflect.ValueOf(target).Kind() == kind +} + +func existsMapKey(m map[string]interface{}, k string) bool { + _, ok := m[k] + return ok +} + +func isStringInSlice(s []string, what string) bool { + for i := range s { + if s[i] == what { + return true + } + } + return false +} + +func marshalToJsonString(value interface{}) (*string, error) { + + mBytes, err := json.Marshal(value) + if err != nil { + return nil, err + } + + sBytes := string(mBytes) + return &sBytes, nil +} + +func isJsonNumber(what interface{}) bool { + + switch what.(type) { + + case json.Number: + return true + } + + return false +} + +func checkJsonNumber(what interface{}) (isValidFloat64 bool, isValidInt64 bool, isValidInt32 bool) { + + jsonNumber := what.(json.Number) + + f64, errFloat64 := jsonNumber.Float64() + s64 := strconv.FormatFloat(f64, 'f', -1, 64) + _, errInt64 := strconv.ParseInt(s64, 10, 64) + + isValidFloat64 = errFloat64 == nil + isValidInt64 = errInt64 == nil + + _, errInt32 := strconv.ParseInt(s64, 10, 32) + isValidInt32 = isValidInt64 && errInt32 == nil + + return + +} + +// same as ECMA Number.MAX_SAFE_INTEGER and Number.MIN_SAFE_INTEGER +const ( + max_json_float = float64(1<<53 - 1) // 9007199254740991.0 2^53 - 1 + min_json_float = -float64(1<<53 - 1) //-9007199254740991.0 -2^53 - 1 +) + +func isFloat64AnInteger(f float64) bool { + + if math.IsNaN(f) || math.IsInf(f, 0) || f < min_json_float || f > max_json_float { + return false + } + + return f == float64(int64(f)) || f == float64(uint64(f)) +} + +func mustBeInteger(what interface{}) *int { + + if isJsonNumber(what) { + + number := what.(json.Number) + + _, _, isValidInt32 := checkJsonNumber(number) + + if isValidInt32 { + + int64Value, err := number.Int64() + if err != nil { + return nil + } + + int32Value := int(int64Value) + return &int32Value + + } else { + return nil + } + + } + + return nil +} + +func mustBeNumber(what interface{}) *float64 { + + if isJsonNumber(what) { + + number := what.(json.Number) + float64Value, err := number.Float64() + + if err == nil { + return &float64Value + } else { + return nil + } + + } + + return nil + +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatJsonNumber(n json.Number) string { + + if int64Value, err := n.Int64(); err == nil { + return fmt.Sprintf("%d", int64Value) + } + + float64Value, _ := n.Float64() + + return fmt.Sprintf("%g", float64Value) +} + +// formats a number so that it is displayed as the smallest string possible +func resultErrorFormatNumber(n float64) string { + + if isFloat64AnInteger(n) { + return fmt.Sprintf("%d", int64(n)) + } + + return fmt.Sprintf("%g", n) +} + +func convertDocumentNode(val interface{}) interface{} { + + if lval, ok := val.([]interface{}); ok { + + res := []interface{}{} + for _, v := range lval { + res = append(res, convertDocumentNode(v)) + } + + return res + + } + + if mval, ok := val.(map[interface{}]interface{}); ok { + + res := map[string]interface{}{} + + for k, v := range mval { + res[k.(string)] = convertDocumentNode(v) + } + + return res + + } + + return val +} diff --git a/vendor/github.com/xeipuuv/gojsonschema/validation.go b/vendor/github.com/xeipuuv/gojsonschema/validation.go new file mode 100644 index 00000000..9afea251 --- /dev/null +++ b/vendor/github.com/xeipuuv/gojsonschema/validation.go @@ -0,0 +1,844 @@ +// Copyright 2015 xeipuuv ( https://github.com/xeipuuv ) +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// author xeipuuv +// author-github https://github.com/xeipuuv +// author-mail xeipuuv@gmail.com +// +// repository-name gojsonschema +// repository-desc An implementation of JSON Schema, based on IETF's draft v4 - Go language. +// +// description Extends Schema and subSchema, implements the validation phase. +// +// created 28-02-2013 + +package gojsonschema + +import ( + "encoding/json" + "reflect" + "regexp" + "strconv" + "strings" + "unicode/utf8" +) + +func Validate(ls JSONLoader, ld JSONLoader) (*Result, error) { + + var err error + + // load schema + + schema, err := NewSchema(ls) + if err != nil { + return nil, err + } + + // begine validation + + return schema.Validate(ld) + +} + +func (v *Schema) Validate(l JSONLoader) (*Result, error) { + + // load document + + root, err := l.LoadJSON() + if err != nil { + return nil, err + } + + // begin validation + + result := &Result{} + context := newJsonContext(STRING_CONTEXT_ROOT, nil) + v.rootSchema.validateRecursive(v.rootSchema, root, result, context) + + return result, nil + +} + +func (v *subSchema) subValidateWithContext(document interface{}, context *jsonContext) *Result { + result := &Result{} + v.validateRecursive(v, document, result, context) + return result +} + +// Walker function to validate the json recursively against the subSchema +func (v *subSchema) validateRecursive(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateRecursive %s", context.String()) + internalLog(" %v", currentNode) + } + + // Handle referenced schemas, returns directly when a $ref is found + if currentSubSchema.refSchema != nil { + v.validateRecursive(currentSubSchema.refSchema, currentNode, result, context) + return + } + + // Check for null value + if currentNode == nil { + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_NULL) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_NULL, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, currentNode, result, context) + v.validateCommon(currentSubSchema, currentNode, result, context) + + } else { // Not a null value + + if isJsonNumber(currentNode) { + + value := currentNode.(json.Number) + + _, isValidInt64, _ := checkJsonNumber(value) + + validType := currentSubSchema.types.Contains(TYPE_NUMBER) || (isValidInt64 && currentSubSchema.types.Contains(TYPE_INTEGER)) + + if currentSubSchema.types.IsTyped() && !validType { + + givenType := TYPE_INTEGER + if !isValidInt64 { + givenType = TYPE_NUMBER + } + + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": givenType, + }, + ) + return + } + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } else { + + rValue := reflect.ValueOf(currentNode) + rKind := rValue.Kind() + + switch rKind { + + // Slice => JSON array + + case reflect.Slice: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_ARRAY) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_ARRAY, + }, + ) + return + } + + castCurrentNode := currentNode.([]interface{}) + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateArray(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + // Map => JSON object + + case reflect.Map: + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_OBJECT) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_OBJECT, + }, + ) + return + } + + castCurrentNode, ok := currentNode.(map[string]interface{}) + if !ok { + castCurrentNode = convertDocumentNode(currentNode).(map[string]interface{}) + } + + currentSubSchema.validateSchema(currentSubSchema, castCurrentNode, result, context) + + v.validateObject(currentSubSchema, castCurrentNode, result, context) + v.validateCommon(currentSubSchema, castCurrentNode, result, context) + + for _, pSchema := range currentSubSchema.propertiesChildren { + nextNode, ok := castCurrentNode[pSchema.property] + if ok { + subContext := newJsonContext(pSchema.property, context) + v.validateRecursive(pSchema, nextNode, result, subContext) + } + } + + // Simple JSON values : string, number, boolean + + case reflect.Bool: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_BOOLEAN) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_BOOLEAN, + }, + ) + return + } + + value := currentNode.(bool) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + case reflect.String: + + if currentSubSchema.types.IsTyped() && !currentSubSchema.types.Contains(TYPE_STRING) { + result.addError( + new(InvalidTypeError), + context, + currentNode, + ErrorDetails{ + "expected": currentSubSchema.types.String(), + "given": TYPE_STRING, + }, + ) + return + } + + value := currentNode.(string) + + currentSubSchema.validateSchema(currentSubSchema, value, result, context) + v.validateNumber(currentSubSchema, value, result, context) + v.validateCommon(currentSubSchema, value, result, context) + v.validateString(currentSubSchema, value, result, context) + + } + + } + + } + + result.incrementScore() +} + +// Different kinds of validation there, subSchema / common / array / object / string... +func (v *subSchema) validateSchema(currentSubSchema *subSchema, currentNode interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateSchema %s", context.String()) + internalLog(" %v", currentNode) + } + + if len(currentSubSchema.anyOf) > 0 { + + validatedAnyOf := false + var bestValidationResult *Result + + for _, anyOfSchema := range currentSubSchema.anyOf { + if !validatedAnyOf { + validationResult := anyOfSchema.subValidateWithContext(currentNode, context) + validatedAnyOf = validationResult.Valid() + + if !validatedAnyOf && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + } + if !validatedAnyOf { + + result.addError(new(NumberAnyOfError), context, currentNode, ErrorDetails{}) + + if bestValidationResult != nil { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + } + + if len(currentSubSchema.oneOf) > 0 { + + nbValidated := 0 + var bestValidationResult *Result + + for _, oneOfSchema := range currentSubSchema.oneOf { + validationResult := oneOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } else if nbValidated == 0 && (bestValidationResult == nil || validationResult.score > bestValidationResult.score) { + bestValidationResult = validationResult + } + } + + if nbValidated != 1 { + + result.addError(new(NumberOneOfError), context, currentNode, ErrorDetails{}) + + if nbValidated == 0 { + // add error messages of closest matching subSchema as + // that's probably the one the user was trying to match + result.mergeErrors(bestValidationResult) + } + } + + } + + if len(currentSubSchema.allOf) > 0 { + nbValidated := 0 + + for _, allOfSchema := range currentSubSchema.allOf { + validationResult := allOfSchema.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + nbValidated++ + } + result.mergeErrors(validationResult) + } + + if nbValidated != len(currentSubSchema.allOf) { + result.addError(new(NumberAllOfError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.not != nil { + validationResult := currentSubSchema.not.subValidateWithContext(currentNode, context) + if validationResult.Valid() { + result.addError(new(NumberNotError), context, currentNode, ErrorDetails{}) + } + } + + if currentSubSchema.dependencies != nil && len(currentSubSchema.dependencies) > 0 { + if isKind(currentNode, reflect.Map) { + for elementKey := range currentNode.(map[string]interface{}) { + if dependency, ok := currentSubSchema.dependencies[elementKey]; ok { + switch dependency := dependency.(type) { + + case []string: + for _, dependOnKey := range dependency { + if _, dependencyResolved := currentNode.(map[string]interface{})[dependOnKey]; !dependencyResolved { + result.addError( + new(MissingDependencyError), + context, + currentNode, + ErrorDetails{"dependency": dependOnKey}, + ) + } + } + + case *subSchema: + dependency.validateRecursive(dependency, currentNode, result, context) + + } + } + } + } + } + + result.incrementScore() +} + +func (v *subSchema) validateCommon(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateCommon %s", context.String()) + internalLog(" %v", value) + } + + // enum: + if len(currentSubSchema.enum) > 0 { + has, err := currentSubSchema.ContainsEnum(value) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"error": err}) + } + if !has { + result.addError( + new(EnumError), + context, + value, + ErrorDetails{ + "allowed": strings.Join(currentSubSchema.enum, ", "), + }, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateArray(currentSubSchema *subSchema, value []interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateArray %s", context.String()) + internalLog(" %v", value) + } + + nbValues := len(value) + + // TODO explain + if currentSubSchema.itemsChildrenIsSingleSchema { + for i := range value { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[0].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } else { + if currentSubSchema.itemsChildren != nil && len(currentSubSchema.itemsChildren) > 0 { + + nbItems := len(currentSubSchema.itemsChildren) + + // while we have both schemas and values, check them against each other + for i := 0; i != nbItems && i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := currentSubSchema.itemsChildren[i].subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + + if nbItems < nbValues { + // we have less schemas than elements in the instance array, + // but that might be ok if "additionalItems" is specified. + + switch currentSubSchema.additionalItems.(type) { + case bool: + if !currentSubSchema.additionalItems.(bool) { + result.addError(new(ArrayNoAdditionalItemsError), context, value, ErrorDetails{}) + } + case *subSchema: + additionalItemSchema := currentSubSchema.additionalItems.(*subSchema) + for i := nbItems; i != nbValues; i++ { + subContext := newJsonContext(strconv.Itoa(i), context) + validationResult := additionalItemSchema.subValidateWithContext(value[i], subContext) + result.mergeErrors(validationResult) + } + } + } + } + } + + // minItems & maxItems + if currentSubSchema.minItems != nil { + if nbValues < int(*currentSubSchema.minItems) { + result.addError( + new(ArrayMinItemsError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minItems}, + ) + } + } + if currentSubSchema.maxItems != nil { + if nbValues > int(*currentSubSchema.maxItems) { + result.addError( + new(ArrayMaxItemsError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxItems}, + ) + } + } + + // uniqueItems: + if currentSubSchema.uniqueItems { + var stringifiedItems []string + for _, v := range value { + vString, err := marshalToJsonString(v) + if err != nil { + result.addError(new(InternalError), context, value, ErrorDetails{"err": err}) + } + if isStringInSlice(stringifiedItems, *vString) { + result.addError( + new(ItemsMustBeUniqueError), + context, + value, + ErrorDetails{"type": TYPE_ARRAY}, + ) + } + stringifiedItems = append(stringifiedItems, *vString) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateObject(currentSubSchema *subSchema, value map[string]interface{}, result *Result, context *jsonContext) { + + if internalLogEnabled { + internalLog("validateObject %s", context.String()) + internalLog(" %v", value) + } + + // minProperties & maxProperties: + if currentSubSchema.minProperties != nil { + if len(value) < int(*currentSubSchema.minProperties) { + result.addError( + new(ArrayMinPropertiesError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minProperties}, + ) + } + } + if currentSubSchema.maxProperties != nil { + if len(value) > int(*currentSubSchema.maxProperties) { + result.addError( + new(ArrayMaxPropertiesError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxProperties}, + ) + } + } + + // required: + for _, requiredProperty := range currentSubSchema.required { + _, ok := value[requiredProperty] + if ok { + result.incrementScore() + } else { + result.addError( + new(RequiredError), + context, + value, + ErrorDetails{"property": requiredProperty}, + ) + } + } + + // additionalProperty & patternProperty: + if currentSubSchema.additionalProperties != nil { + + switch currentSubSchema.additionalProperties.(type) { + case bool: + + if !currentSubSchema.additionalProperties.(bool) { + + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + } + + } else { + + if !pp_has || !pp_match { + result.addError( + new(AdditionalPropertyNotAllowedError), + context, + value[pk], + ErrorDetails{"property": pk}, + ) + } + + } + } + } + + case *subSchema: + + additionalPropertiesSchema := currentSubSchema.additionalProperties.(*subSchema) + for pk := range value { + + found := false + for _, spValue := range currentSubSchema.propertiesChildren { + if pk == spValue.property { + found = true + } + } + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if found { + + if pp_has && !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } else { + + if !pp_has || !pp_match { + validationResult := additionalPropertiesSchema.subValidateWithContext(value[pk], context) + result.mergeErrors(validationResult) + } + + } + + } + } + } else { + + for pk := range value { + + pp_has, pp_match := v.validatePatternProperty(currentSubSchema, pk, value[pk], result, context) + + if pp_has && !pp_match { + + result.addError( + new(InvalidPropertyPatternError), + context, + value[pk], + ErrorDetails{ + "property": pk, + "pattern": currentSubSchema.PatternPropertiesString(), + }, + ) + } + + } + } + + result.incrementScore() +} + +func (v *subSchema) validatePatternProperty(currentSubSchema *subSchema, key string, value interface{}, result *Result, context *jsonContext) (has bool, matched bool) { + + if internalLogEnabled { + internalLog("validatePatternProperty %s", context.String()) + internalLog(" %s %v", key, value) + } + + has = false + + validatedkey := false + + for pk, pv := range currentSubSchema.patternProperties { + if matches, _ := regexp.MatchString(pk, key); matches { + has = true + subContext := newJsonContext(key, context) + validationResult := pv.subValidateWithContext(value, subContext) + result.mergeErrors(validationResult) + if validationResult.Valid() { + validatedkey = true + } + } + } + + if !validatedkey { + return has, false + } + + result.incrementScore() + + return has, true +} + +func (v *subSchema) validateString(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore JSON numbers + if isJsonNumber(value) { + return + } + + // Ignore non strings + if !isKind(value, reflect.String) { + return + } + + if internalLogEnabled { + internalLog("validateString %s", context.String()) + internalLog(" %v", value) + } + + stringValue := value.(string) + + // minLength & maxLength: + if currentSubSchema.minLength != nil { + if utf8.RuneCount([]byte(stringValue)) < int(*currentSubSchema.minLength) { + result.addError( + new(StringLengthGTEError), + context, + value, + ErrorDetails{"min": *currentSubSchema.minLength}, + ) + } + } + if currentSubSchema.maxLength != nil { + if utf8.RuneCount([]byte(stringValue)) > int(*currentSubSchema.maxLength) { + result.addError( + new(StringLengthLTEError), + context, + value, + ErrorDetails{"max": *currentSubSchema.maxLength}, + ) + } + } + + // pattern: + if currentSubSchema.pattern != nil { + if !currentSubSchema.pattern.MatchString(stringValue) { + result.addError( + new(DoesNotMatchPatternError), + context, + value, + ErrorDetails{"pattern": currentSubSchema.pattern}, + ) + + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, stringValue) { + result.addError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} + +func (v *subSchema) validateNumber(currentSubSchema *subSchema, value interface{}, result *Result, context *jsonContext) { + + // Ignore non numbers + if !isJsonNumber(value) { + return + } + + if internalLogEnabled { + internalLog("validateNumber %s", context.String()) + internalLog(" %v", value) + } + + number := value.(json.Number) + float64Value, _ := number.Float64() + + // multipleOf: + if currentSubSchema.multipleOf != nil { + + if !isFloat64AnInteger(float64Value / *currentSubSchema.multipleOf) { + result.addError( + new(MultipleOfError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{"multiple": *currentSubSchema.multipleOf}, + ) + } + } + + //maximum & exclusiveMaximum: + if currentSubSchema.maximum != nil { + if currentSubSchema.exclusiveMaximum { + if float64Value >= *currentSubSchema.maximum { + result.addError( + new(NumberLTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } else { + if float64Value > *currentSubSchema.maximum { + result.addError( + new(NumberLTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "max": resultErrorFormatNumber(*currentSubSchema.maximum), + }, + ) + } + } + } + + //minimum & exclusiveMinimum: + if currentSubSchema.minimum != nil { + if currentSubSchema.exclusiveMinimum { + if float64Value <= *currentSubSchema.minimum { + result.addError( + new(NumberGTError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } else { + if float64Value < *currentSubSchema.minimum { + result.addError( + new(NumberGTEError), + context, + resultErrorFormatJsonNumber(number), + ErrorDetails{ + "min": resultErrorFormatNumber(*currentSubSchema.minimum), + }, + ) + } + } + } + + // format + if currentSubSchema.format != "" { + if !FormatCheckers.IsFormat(currentSubSchema.format, float64Value) { + result.addError( + new(DoesNotMatchFormatError), + context, + value, + ErrorDetails{"format": currentSubSchema.format}, + ) + } + } + + result.incrementScore() +} From 27c2eda635a8cdb9c2248ef6e17c067b0bc253ba Mon Sep 17 00:00:00 2001 From: Vincent Batts Date: Wed, 10 Jan 2018 16:23:35 -0500 Subject: [PATCH 36/59] Makefile: installing a config, requires a config Signed-off-by: Vincent Batts --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 62770df7..d8c61987 100644 --- a/Makefile +++ b/Makefile @@ -145,7 +145,7 @@ install.man: install ${SELINUXOPT} -m 644 $(filter %.5,$(MANPAGES)) -t $(MANDIR)/man5 install ${SELINUXOPT} -m 644 $(filter %.8,$(MANPAGES)) -t $(MANDIR)/man8 -install.config: +install.config: crio.conf install ${SELINUXOPT} -D -m 644 crio.conf $(ETCDIR_CRIO)/crio.conf install ${SELINUXOPT} -D -m 644 seccomp.json $(ETCDIR_CRIO)/seccomp.json install ${SELINUXOPT} -D -m 644 crio-umount.conf $(OCIUMOUNTINSTALLDIR)/crio-umount.conf From 8d2a572eadd87ca5fb69af583ac8285bfe14fafc Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Thu, 11 Jan 2018 16:54:38 +0100 Subject: [PATCH 37/59] contrib: test: add node-e2e job Signed-off-by: Antonio Murdaca --- contrib/test/integration/main.yml | 22 ++++++++++++++++++++++ contrib/test/integration/node-e2e.yml | 26 ++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 contrib/test/integration/node-e2e.yml diff --git a/contrib/test/integration/main.yml b/contrib/test/integration/main.yml index cbbcd12e..1f6448f5 100644 --- a/contrib/test/integration/main.yml +++ b/contrib/test/integration/main.yml @@ -41,6 +41,7 @@ tags: - integration - e2e + - node-e2e - critest tasks: - name: clone build and install cri-o @@ -80,6 +81,27 @@ - name: run critest validation and benchmarks include: critest.yml +- hosts: all + remote_user: root + vars_files: + - "{{ playbook_dir }}/vars.yml" + tags: + - node-e2e + tasks: + - name: install Golang tools + include: golang.yml + vars: + version: "1.9.2" + - name: clone build and install kubernetes + include: "build/kubernetes.yml" + vars: + force_clone: True + k8s_git_version: "master" + k8s_github_fork: "kubernetes" + crio_socket: "/var/run/crio/crio.sock" + - name: run k8s node-e2e tests + include: node-e2e.yml + - hosts: all remote_user: root vars_files: diff --git a/contrib/test/integration/node-e2e.yml b/contrib/test/integration/node-e2e.yml new file mode 100644 index 00000000..6ea8ac2c --- /dev/null +++ b/contrib/test/integration/node-e2e.yml @@ -0,0 +1,26 @@ +--- + +- name: enable and start CRI-O + systemd: + name: crio + state: started + enabled: yes + daemon_reload: yes + +- name: disable SELinux + command: setenforce 0 + +- name: Flush the iptables + command: iptables -F + +- name: run node-e2e tests + shell: | + # parametrize crio socket + # cgroup-driver??? + # TODO(runcom): remove conformance focus, we want everything for testgrid + make test-e2e-node PARALLELISM=1 RUNTIME=remote CONTAINER_RUNTIME_ENDPOINT=/var/run/crio.sock IMAGE_SERVICE_ENDPOINT=/var/run/crio/crio.sock TEST_ARGS='--prepull-images=true --kubelet-flags="--cgroup-driver=systemd"' FOCUS="\[Conformance\]" &> {{ artifacts }}/node-e2e.log + args: + chdir: "{{ ansible_env.GOPATH }}/src/k8s.io/kubernetes" + async: 7200 + poll: 10 + ignore_errors: true From 8c190a683cdc072da6c752a4fde2419e90feeee1 Mon Sep 17 00:00:00 2001 From: Antonio Murdaca Date: Fri, 12 Jan 2018 15:45:44 +0100 Subject: [PATCH 38/59] server: use grpc getters to avoid panics Signed-off-by: Antonio Murdaca --- server/container_create.go | 6 +++++- server/sandbox_run.go | 14 +++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/server/container_create.go b/server/container_create.go index 56a94c2b..7db6a51d 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -562,7 +562,11 @@ func (s *Server) CreateContainer(ctx context.Context, req *pb.CreateContainerReq return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig is nil") } - name := containerConfig.GetMetadata().Name + if containerConfig.GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + + name := containerConfig.GetMetadata().GetName() if name == "" { return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Name is empty") } diff --git a/server/sandbox_run.go b/server/sandbox_run.go index 9e9ff487..8e6c1732 100644 --- a/server/sandbox_run.go +++ b/server/sandbox_run.go @@ -101,16 +101,20 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest s.updateLock.RLock() defer s.updateLock.RUnlock() + if req.GetConfig().GetMetadata() == nil { + return nil, fmt.Errorf("CreateContainerRequest.ContainerConfig.Metadata is nil") + } + logrus.Debugf("RunPodSandboxRequest %+v", req) var processLabel, mountLabel, resolvPath string // process req.Name - kubeName := req.GetConfig().GetMetadata().Name + kubeName := req.GetConfig().GetMetadata().GetName() if kubeName == "" { return nil, fmt.Errorf("PodSandboxConfig.Name should not be empty") } - namespace := req.GetConfig().GetMetadata().Namespace - attempt := req.GetConfig().GetMetadata().Attempt + namespace := req.GetConfig().GetMetadata().GetNamespace() + attempt := req.GetConfig().GetMetadata().GetAttempt() id, name, err := s.generatePodIDandName(req.GetConfig()) if err != nil { @@ -156,8 +160,8 @@ func (s *Server) RunPodSandbox(ctx context.Context, req *pb.RunPodSandboxRequest name, id, s.config.PauseImage, "", containerName, - req.GetConfig().GetMetadata().Name, - req.GetConfig().GetMetadata().Uid, + req.GetConfig().GetMetadata().GetName(), + req.GetConfig().GetMetadata().GetUid(), namespace, attempt, nil) From e124834b0dd54bf37b51847d28ff071289b21838 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 09:47:56 -0800 Subject: [PATCH 39/59] test/README: Clear Containers moved to clearcontainers/runtime And changed the name of their binary. This commit catches the docs up with intel/cc-oci-runtime#1065 (merged 2017-09-25). Signed-off-by: W. Trevor King --- test/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/README.md b/test/README.md index eb314e8d..bd39756f 100644 --- a/test/README.md +++ b/test/README.md @@ -69,11 +69,11 @@ Tests on the host will run with `runc` as the default runtime. However you can select other OCI compatible runtimes by setting the `RUNTIME` environment variable. -For example one could use the [Clear Containers](https://github.com/01org/cc-oci-runtime/wiki/Installation) +For example one could use the [Clear Containers](https://github.com/clearcontainers/runtime) runtime instead of `runc`: ``` -make localintegration RUNTIME=cc-oci-runtime +make localintegration RUNTIME=cc-runtime ``` ## Writing integration tests From 8c7c70c2dba50cbd8d71e2724ba67ea034c233f4 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 10:17:00 -0800 Subject: [PATCH 40/59] .mailmap: Add entries for inconsistent users Where the same user had multiple entries, I mostly went with whichever entry had the most-recent non-merge commits. The order is alphabetical according to Emacs' sort-lines. Signed-off-by: W. Trevor King --- .mailmap | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .mailmap diff --git a/.mailmap b/.mailmap new file mode 100644 index 00000000..29d8c860 --- /dev/null +++ b/.mailmap @@ -0,0 +1,10 @@ +Aleksa Sarai +Antonio Murdaca +CuiHaozhi +Daniel J Walsh +Haiyan Meng +Lorenzo Fontana +Mrunal Patel +Mrunal Patel +Pengfei Ni +Tobias Klauser From 2bf750c87191be825e0a0ddf6a6e3a55e3085181 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 13:03:10 -0800 Subject: [PATCH 41/59] tutorial: Drop install.config output to stay DRY install.config has also installed rio-umount.conf since 51b225474 (Tell oci-umount where to remove mountpoints inside container, #937, 2017-09-21). And Make output is usually not particularly interesting. Signed-off-by: W. Trevor King --- tutorial.md | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/tutorial.md b/tutorial.md index 656a706f..9863af58 100644 --- a/tutorial.md +++ b/tutorial.md @@ -151,19 +151,12 @@ install -D -m 644 crio.conf /etc/crio/crio.conf install -D -m 644 seccomp.json /etc/crio/seccomp.json ``` -If you are installing for the first time, generate config as follows: +If you are installing for the first time, generate and install configuration files with: ``` sudo make install.config ``` -Output: - -``` -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json -``` - #### Start the crio system daemon ``` From bf8a99c0856b5b837e3248a534d68367c426a089 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 13:07:54 -0800 Subject: [PATCH 42/59] tutorial: Drop 'make install' output to stay DRY 'make install' hasn't installed crio.conf since 8b632729 (Install to /usr/local to avoid conflicts with vendor binaries, 2017-01-04, #304). And Make output is usually not particularly interesting. Signed-off-by: W. Trevor King --- tutorial.md | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/tutorial.md b/tutorial.md index 9863af58..693c9c96 100644 --- a/tutorial.md +++ b/tutorial.md @@ -138,19 +138,6 @@ make sudo make install ``` -Output: - -``` -install -D -m 755 crio /usr/local/bin/crio -install -D -m 755 conmon/conmon /usr/local/libexec/crio/conmon -install -D -m 755 pause/pause /usr/local/libexec/crio/pause -install -d -m 755 /usr/local/share/man/man{1,5,8} -install -m 644 docs/crio.conf.5 -t /usr/local/share/man/man5 -install -m 644 docs/crio.8 -t /usr/local/share/man/man8 -install -D -m 644 crio.conf /etc/crio/crio.conf -install -D -m 644 seccomp.json /etc/crio/seccomp.json -``` - If you are installing for the first time, generate and install configuration files with: ``` From 15d839ea0da4c9f587aab1beb88045d40d1ea200 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 12:56:45 -0800 Subject: [PATCH 43/59] kubernetes: Simplify and freshen the required-files table The cri-o entries are stale vs. the content currently installed by the Makefile. This commit drops them and just references the make call before starting the table, which lets us stay DRY. runc is not built from the cri-o repository. The docs have claimed it was since 983aec63 (doc: Add instruction to run cri-o with kubernetes, 2017-01-31, #353), but it's independent like the CNI plugins. The CNI plugins were moved to containernetworking/plugins in containernetworking/cni@bc0d09e (plugins: moved to containernetworking/plugins, 2017-05-17, containernetworking/cni#457). I've added a link to the in-repo policy.json example. We probably also want to link to the docs (for the version we vendor?) [1], but I've left that alone for now. The CNI config examples were removed from the project README in 9088a12c (contrib: cni: provide example CNI configurations, 2016-12-24, #295). I've adjusted the reference to point to the new location, although again, I'd rather replace this with links to upstream docs. [1]: https://github.com/containers/image/blob/3d0304a02154dddc8f97cc833aa0861cea5e9ade/docs/policy.json.md Signed-off-by: W. Trevor King --- kubernetes.md | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/kubernetes.md b/kubernetes.md index 96ef5d89..e95694ac 100644 --- a/kubernetes.md +++ b/kubernetes.md @@ -13,17 +13,15 @@ Below, you can find an instruction how to switch one or more nodes on running ku ### Preparing crio -You must prepare and install `crio` on each node you would like to switch. Here's the list of files that must be provided: +You must prepare and install `crio` on each node you would like to switch. +Besides the files installed by `make install install.config`, here's the list of files that must be provided: -| File path | Description | Location | -|--------------------------------------------|----------------------------|-----------------------------------------------------| -| `/etc/crio/crio.conf` | crio configuration | Generated on cri-o `make install` | -| `/etc/crio/seccomp.conf` | seccomp config | Example stored in cri-o repository | -| `/etc/containers/policy.json` | containers policy | Example stored in cri-o repository | -| `/bin/{crio, runc}` | `crio` and `runc` binaries | Built from cri-o repository | -| `/usr/local/libexec/crio/conmon` | `conmon` binary | Built from cri-o repository | -| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/cni` | -| `/etc/cni/net.d/10-mynet.conf` | Network config | Example stored in [README file](README.md) | +| File path | Description | Location | +|--------------------------------------------|-----------------------------|---------------------------------------------------------| +| `/etc/containers/policy.json` | containers policy | [Example](test/policy.json) stored in cri-o repository | +| `/bin/runc` | `runc` or other OCI runtime | Can be build from sources `opencontainers/runc` | +| `/opt/cni/bin/{flannel, bridge,...}` | CNI plugins binaries | Can be built from sources `containernetworking/plugins` | +| `/etc/cni/net.d/...` | CNI network config | Example [here](contrib/cni) | `crio` binary can be executed directly on host, inside the container or in any way. However, recommended way is to set it as a systemd service. From 282b900433d497260c6b8ff1243905b376d0428d Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Thu, 18 Jan 2018 14:06:27 -0800 Subject: [PATCH 44/59] test/README: Update the CNI plugins instructions for /cni -> /plugins Catching up with the Dockerfile change from f51b0a10 (Dockerfile: move to containernetworking/plugins, 2017-05-25, #536). The new plugins commit from f51b0a10 is still the current Dockerfile entry. This commit also replaces the previous 'go get' call with a git clone to match the Dockerfile's approach. I've added an additional 'cd' call so I don't have to repeat $GOPATH/... more than once, but other than that, the example matches the current Dockerfile entry. I've also removed some line-continuation slashes we've been dragging around since the section landed 07ccda33 (tests: Install CNI configuration files by default, 2017-04-06, #434). I'm guessing they were a copy/paste bug from the Dockerfile, but this example has new prompts for each command (so it doesn't need continuation) while the Dockerfile is using && chaining (so it does). Signed-off-by: W. Trevor King --- test/README.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/test/README.md b/test/README.md index eb314e8d..e759b691 100644 --- a/test/README.md +++ b/test/README.md @@ -41,11 +41,12 @@ You will also need to install the [CNI](https://github.com/containernetworking/c the the default pod test template runs without host networking: ``` -$ go get github.com/containernetworking/cni -$ cd "$GOPATH/src/github.com/containernetworking/cni" -$ git checkout -q d4bbce1865270cd2d2be558d6a23e63d314fe769 -$ ./build.sh \ -$ mkdir -p /opt/cni/bin \ +$ cd "$GOPATH/src/github.com/containernetworking" +$ git clone https://github.com/containernetworking/plugins.git +$ cd plugins +$ git checkout -q dcf7368eeab15e2affc6256f0bb1e84dd46a34de +$ ./build.sh +$ mkdir -p /opt/cni/bin $ cp bin/* /opt/cni/bin/ ``` From 78511156935915b310ae96f51c2731a5ef59f4c7 Mon Sep 17 00:00:00 2001 From: Mrunal Patel Date: Fri, 19 Jan 2018 10:37:51 -0800 Subject: [PATCH 45/59] Add 1.9 release to compatibility table Signed-off-by: Mrunal Patel --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 14cc3014..69feaa34 100644 --- a/README.md +++ b/README.md @@ -12,6 +12,7 @@ |----------------------------|-------------------------------|--------------------| | CRI-O 1.0.x - release-1.0 | Kubernetes 1.7 branch, v1.7.x | = | | CRI-O 1.8.x - release-1.8 | Kubernetes 1.8 branch, v1.8.x | = | +| CRI-O 1.9.x - release-1.9 | Kubernetes 1.9 branch, v1.9.x | = | | CRI-O HEAD - master | Kubernetes master branch | ✓ | Key: From 826298483a949931fe33d9d353fb9b72c44c19f1 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Fri, 19 Jan 2018 11:39:27 -0800 Subject: [PATCH 46/59] hack/libdm_installed: Add a test for libdevmapper.h Avoid crashing 'make' with: No package 'devmapper' found by disabling the devmapper driver when the library it requires is not installed. Also give the libdm_no_deferred_remove script a more specific name to avoid confusion. Signed-off-by: W. Trevor King --- Makefile | 2 +- hack/libdm_installed.sh | 7 +++++++ hack/{libdm_tag.sh => libdm_no_deferred_remove_tag.sh} | 0 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100755 hack/libdm_installed.sh rename hack/{libdm_tag.sh => libdm_no_deferred_remove_tag.sh} (100%) diff --git a/Makefile b/Makefile index d8c61987..86898afc 100644 --- a/Makefile +++ b/Makefile @@ -11,7 +11,7 @@ LIBEXECDIR ?= ${PREFIX}/libexec MANDIR ?= ${PREFIX}/share/man ETCDIR ?= ${DESTDIR}/etc ETCDIR_CRIO ?= ${ETCDIR}/crio -BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh) +BUILDTAGS ?= seccomp $(shell hack/btrfs_tag.sh) $(shell hack/libdm_installed.sh) $(shell hack/libdm_no_deferred_remove_tag.sh) $(shell hack/btrfs_installed_tag.sh) $(shell hack/ostree_tag.sh) $(shell hack/selinux_tag.sh) CRICTL_CONFIG_DIR=${DESTDIR}/etc BASHINSTALLDIR=${PREFIX}/share/bash-completion/completions diff --git a/hack/libdm_installed.sh b/hack/libdm_installed.sh new file mode 100755 index 00000000..f48c7e27 --- /dev/null +++ b/hack/libdm_installed.sh @@ -0,0 +1,7 @@ +#!/bin/bash +cc -E - > /dev/null 2> /dev/null << EOF +#include +EOF +if test $? -ne 0 ; then + echo exclude_graphdriver_devicemapper +fi diff --git a/hack/libdm_tag.sh b/hack/libdm_no_deferred_remove_tag.sh similarity index 100% rename from hack/libdm_tag.sh rename to hack/libdm_no_deferred_remove_tag.sh From 523326b7ba15e6e38fd4d1716d78c860ac1ad4fa Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Fri, 19 Jan 2018 09:49:28 -0800 Subject: [PATCH 47/59] server/container_create: Factor out setupCapabilities helper Having a separate function holding the details of this makes reading createSandboxContainer easier. While I was moving the code, I've also cleaned up two things: * The nil capabilities check is now earlier, where before it had been between the ALL handling and the non-ALL handling. * I've added a capPrefixed variable to avoid having multiple toCAPPrefixed calls per capability. Signed-off-by: W. Trevor King --- server/container_create.go | 202 ++++++++++++++++++++----------------- 1 file changed, 107 insertions(+), 95 deletions(-) diff --git a/server/container_create.go b/server/container_create.go index 6afc412b..506bcf79 100644 --- a/server/container_create.go +++ b/server/container_create.go @@ -490,6 +490,110 @@ func setupContainerUser(specgen *generate.Generator, rootfs string, sc *pb.Linux return nil } +// setupCapabilities sets process.capabilities in the OCI runtime config. +func setupCapabilities(specgen *generate.Generator, capabilities *pb.Capability) error { + if capabilities == nil { + return nil + } + + toCAPPrefixed := func(cap string) string { + if !strings.HasPrefix(strings.ToLower(cap), "cap_") { + return "CAP_" + strings.ToUpper(cap) + } + return cap + } + + // Add/drop all capabilities if "all" is specified, so that + // following individual add/drop could still work. E.g. + // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} + // will be all capabilities without `CAP_CHOWN`. + // see https://github.com/kubernetes/kubernetes/issues/51980 + if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.AddProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { + for _, c := range getOCICapabilitiesList() { + if err := specgen.DropProcessCapabilityAmbient(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityBounding(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityEffective(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityInheritable(c); err != nil { + return err + } + if err := specgen.DropProcessCapabilityPermitted(c); err != nil { + return err + } + } + } + + for _, cap := range capabilities.GetAddCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.AddProcessCapabilityAmbient(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityBounding(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityEffective(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityInheritable(capPrefixed); err != nil { + return err + } + if err := specgen.AddProcessCapabilityPermitted(capPrefixed); err != nil { + return err + } + } + + for _, cap := range capabilities.GetDropCapabilities() { + if strings.ToUpper(cap) == "ALL" { + continue + } + capPrefixed := toCAPPrefixed(cap) + if err := specgen.DropProcessCapabilityAmbient(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityBounding(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityEffective(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityInheritable(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + if err := specgen.DropProcessCapabilityPermitted(capPrefixed); err != nil { + return fmt.Errorf("failed to drop cap %s %v", capPrefixed, err) + } + } + + return nil +} + func hostNetwork(containerConfig *pb.ContainerConfig) bool { securityContext := containerConfig.GetLinux().GetSecurityContext() if securityContext == nil || securityContext.GetNamespaceOptions() == nil { @@ -819,105 +923,13 @@ func (s *Server) createSandboxContainer(ctx context.Context, containerID string, } specgen.SetLinuxCgroupsPath(cgPath) - capabilities := linux.GetSecurityContext().GetCapabilities() if privileged { - // this is setting correct capabilities as well for privileged mode specgen.SetupPrivileged(true) setOCIBindMountsPrivileged(&specgen) } else { - toCAPPrefixed := func(cap string) string { - if !strings.HasPrefix(strings.ToLower(cap), "cap_") { - return "CAP_" + strings.ToUpper(cap) - } - return cap - } - - // Add/drop all capabilities if "all" is specified, so that - // following individual add/drop could still work. E.g. - // AddCapabilities: []string{"ALL"}, DropCapabilities: []string{"CHOWN"} - // will be all capabilities without `CAP_CHOWN`. - // see https://github.com/kubernetes/kubernetes/issues/51980 - if inStringSlice(capabilities.GetAddCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.AddProcessCapabilityAmbient(c); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityBounding(c); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityEffective(c); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityInheritable(c); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityPermitted(c); err != nil { - return nil, err - } - } - } - if inStringSlice(capabilities.GetDropCapabilities(), "ALL") { - for _, c := range getOCICapabilitiesList() { - if err := specgen.DropProcessCapabilityAmbient(c); err != nil { - return nil, err - } - if err := specgen.DropProcessCapabilityBounding(c); err != nil { - return nil, err - } - if err := specgen.DropProcessCapabilityEffective(c); err != nil { - return nil, err - } - if err := specgen.DropProcessCapabilityInheritable(c); err != nil { - return nil, err - } - if err := specgen.DropProcessCapabilityPermitted(c); err != nil { - return nil, err - } - } - } - - if capabilities != nil { - for _, cap := range capabilities.GetAddCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.AddProcessCapabilityAmbient(toCAPPrefixed(cap)); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityBounding(toCAPPrefixed(cap)); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityEffective(toCAPPrefixed(cap)); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityInheritable(toCAPPrefixed(cap)); err != nil { - return nil, err - } - if err := specgen.AddProcessCapabilityPermitted(toCAPPrefixed(cap)); err != nil { - return nil, err - } - } - - for _, cap := range capabilities.GetDropCapabilities() { - if strings.ToUpper(cap) == "ALL" { - continue - } - if err := specgen.DropProcessCapabilityAmbient(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - if err := specgen.DropProcessCapabilityBounding(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - if err := specgen.DropProcessCapabilityEffective(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - if err := specgen.DropProcessCapabilityInheritable(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - if err := specgen.DropProcessCapabilityPermitted(toCAPPrefixed(cap)); err != nil { - return nil, fmt.Errorf("failed to drop cap %s %v", toCAPPrefixed(cap), err) - } - } + err = setupCapabilities(&specgen, linux.GetSecurityContext().GetCapabilities()) + if err != nil { + return nil, err } } specgen.SetProcessSelinuxLabel(processLabel) From b066a839ec526e94e2cdefe262fb328897c83b17 Mon Sep 17 00:00:00 2001 From: Mrunal Patel Date: Fri, 19 Jan 2018 14:27:57 -0800 Subject: [PATCH 48/59] Bump up version to 1.9.1 Signed-off-by: Mrunal Patel --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 5bffa4d7..23fe8797 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "1.9.1-dev" +const Version = "1.9.1" From 2907f2b6c6d62e1b8ce3e7a5813bf82716e9ebdd Mon Sep 17 00:00:00 2001 From: Mrunal Patel Date: Fri, 19 Jan 2018 14:28:25 -0800 Subject: [PATCH 49/59] Bump up version to 1.9.2-dev Signed-off-by: Mrunal Patel --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 23fe8797..1a66ef29 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version is the version of the build. -const Version = "1.9.1" +const Version = "1.9.2-dev" From 59b86617bc6963e58fff9501efd6f71768e5a507 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:29 +0100 Subject: [PATCH 50/59] syscontainer, rhel: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/rhel/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/rhel/run.sh b/contrib/system_containers/rhel/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/rhel/run.sh +++ b/contrib/system_containers/rhel/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From 53bf3f6db5b3793f1fc38293cb292eeb66241f77 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:39 +0100 Subject: [PATCH 51/59] syscontainer, fedora: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/fedora/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/fedora/run.sh b/contrib/system_containers/fedora/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/fedora/run.sh +++ b/contrib/system_containers/fedora/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From 2ba748334dd3ff97f4fd401c57c44dd7d6c2b731 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Thu, 4 Jan 2018 20:14:52 +0100 Subject: [PATCH 52/59] syscontainer, centos: read env variables from files Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/centos/run.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/contrib/system_containers/centos/run.sh b/contrib/system_containers/centos/run.sh index 5621a7ba..7f34fd42 100755 --- a/contrib/system_containers/centos/run.sh +++ b/contrib/system_containers/centos/run.sh @@ -5,4 +5,7 @@ PID=$$ LABEL=`tr -d '\000' < /proc/$PID/attr/current` printf %s $LABEL > /proc/self/attr/exec +test -e /etc/sysconfig/crio-storage && source /etc/sysconfig/crio-storage +test -e /etc/sysconfig/crio-network && source /etc/sysconfig/crio-network + exec /usr/bin/crio --log-level=$LOG_LEVEL From c3f20befe2426348de0ddf83623f58e3fd9bc019 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:00:43 +0100 Subject: [PATCH 53/59] syscontainer, centos: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/centos/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/centos/tmpfiles.template b/contrib/system_containers/centos/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/centos/tmpfiles.template +++ b/contrib/system_containers/centos/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 5609cd44b6acdd18ac280b9d2157b2b71acd0835 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:00:53 +0100 Subject: [PATCH 54/59] syscontainer, rhel: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/rhel/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/rhel/tmpfiles.template b/contrib/system_containers/rhel/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/rhel/tmpfiles.template +++ b/contrib/system_containers/rhel/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 46e6d109c16ddea21c46f507f468f54266600754 Mon Sep 17 00:00:00 2001 From: Giuseppe Scrivano Date: Tue, 9 Jan 2018 18:01:03 +0100 Subject: [PATCH 55/59] syscontainer, fedora: create /var/run/crio Signed-off-by: Giuseppe Scrivano --- contrib/system_containers/fedora/tmpfiles.template | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/system_containers/fedora/tmpfiles.template b/contrib/system_containers/fedora/tmpfiles.template index 015919c4..94472677 100644 --- a/contrib/system_containers/fedora/tmpfiles.template +++ b/contrib/system_containers/fedora/tmpfiles.template @@ -1,4 +1,4 @@ -d ${RUN_DIRECTORY}/${NAME} - - - - - +d ${RUN_DIRECTORY}/crio - - - - - d /etc/crio - - - - - Z /etc/crio - - - - - d ${STATE_DIRECTORY}/origin - - - - - From 822a6516cfe4494b51876c329348473629395da6 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Fri, 19 Jan 2018 16:12:39 -0800 Subject: [PATCH 56/59] docs: Remove the unused play.png The last consumer was removed in 1bf6d203 (Remove kpod code after repository move, 2017-11-02, #1111). Signed-off-by: W. Trevor King --- docs/play.png | Bin 1672 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 docs/play.png diff --git a/docs/play.png b/docs/play.png deleted file mode 100644 index 9be2868fdf87289472ba06069bb298b19089a3d4..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1672 zcmV;326y?1P)Px#AY({UO#lFTCIA3{ga82g0001h=l}q9FaQARU;qF* zm;eA5aGbhPJOBUy24YJ`L;(K){{a7>y{D4^00ojsL_t(o!|j*fZ(P+8$LHL=yY_Cp zwqqx@;~zM&9qeE?p!`s9LJLG{Rf3X=mWN6pB;b`t-uV}Jqz_H|R8pykP$_8|0Y&^2 zC4fwv#H4ZTxLMcU*lQ>I!}s1Z^Bo?}db0^QSqB6!9UZOikvsB6~m2!nJ2n9eq=50(Rx?4AFZBMr~)z_lLtShUhvW2nC@=UH! zjM!6!s4&=qt%Q+;p`;T2{oTz6hC2qkntmAYaxwaJdhz07o=A<7FsubzVFp`B`9au9 z5Q^t{`$swt?YnQ|hQ#**XL60(sdm!wmkF8!Bm|eS)j`3?6u;=mF1ei|GicuC|eOLQ7A#kLBK)4D**>Vc^gGs z3hY}SPaXg8vSo2E9@yFW+!H%eb$%4xw40z8uPjyTU$a&(+*pCYsu5^Mb8@)1r9K{0 zRZ>zVy(uIDV@$qeTN-1kYK-~$LtTLW;f=4Z-w0JzL=~FJ7HR^Q0v3i4)hA-V9o^R3 zmZrOjbGTzax~n_8So-6eXGzIbn?f;EC1dUe29&K3Q6d>{Z%$V9)*_DG*VSZ**i|}F z#Vu#njB%asSu4U=TwWQwyhuc5ZE>GevBnq!=!aik4I_alpb$-^;*|q;vbZH(A4TwE zMxcLr`|A(JG6_GXs!Faft(GU%$f7Zoc>3T#=cWcCHO7eG&woF6;{7S-XAx!Smd&?x zi#5T$ZRscy@;xFdmhAcITsfzzkbp*!G$i9M93H-n`qxumGee=SsR%H%bz@E7zAfp7 zR9&$cRE3U+l#~eWm)N3dL*m85BL^PrbUywzRTv`Lwy9yqww9V=+}@lV>1z!mag~`Z ztn0q6p0I==Z>aZQI6S;cB@z7Tt#ilzG2uFu>m?9n#^|o@uGUmdU?O_#p{_*2x2rX) zf@Fq`;Ctr9!^4j}*i}UxKXz(@844;>86rqG)*pGS|2E}!0uS{xJ-)kVC1k0ZG~9$` zt!!#aKK!#S73$IB~C2In23%X*fz4gEr?{5TK2XuX0wHV zkIxYiphw>vfBocza$MOp5`bYDqkFoaIW+Wb&3B)at6ATeGFVliEGhy( zJt8faMs;<{LPz)Zy!`y$fv(2y)LH9zn=3?rd-u|b_a<*#Uoll|C{=o&t56K2o07kK zV(8f?hg#EhYbjjcK!0v}>EuUKpL{VppUo?j)J+-$sHlo+`{ss6_VoPbVE@kk);p_O zAK1xkt~4>VG%=ma%oXy*P?Z`|@y@n}p}y7!`dV(guRkQXT5f9(wadNyX}jga*N=4a z4XG3{JVuN5+yDRoC3HntbYx+4WjbSWWnpw>05UK!H!UzYEiy1vF)}(cGdeUdD=;!T zFfe2Iqk{kd03~!qSaf7zbY(hiZ)9m^c>ppnF*hwRI4v?TR53C-Gc!6gFe@-JIxsL2 Sr5is00000 Date: Sat, 20 Jan 2018 15:43:18 -0800 Subject: [PATCH 57/59] test/namespaces: Factor out pid_namespace_test helper DRY up this code. The ${parameter:-word} syntax is in POSIX [1]. [1]: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_02 Signed-off-by: W. Trevor King --- test/namespaces.bats | 38 ++++++++------------------------------ 1 file changed, 8 insertions(+), 30 deletions(-) diff --git a/test/namespaces.bats b/test/namespaces.bats index ff9787a9..033cbab2 100644 --- a/test/namespaces.bats +++ b/test/namespaces.bats @@ -6,8 +6,8 @@ function teardown() { cleanup_test } -@test "pod disable shared pid namespace" { - ENABLE_SHARED_PID_NAMESPACE="false" start_crio +function pid_namespace_test() { + start_crio run crictl runs "$TESTDATA"/sandbox_config.json echo "$output" @@ -23,7 +23,7 @@ function teardown() { run crictl exec --sync "$ctr_id" cat /proc/1/cmdline echo "$output" [ "$status" -eq 0 ] - [[ "$output" =~ "redis" ]] + [[ "$output" =~ "${EXPECTED_INIT:-redis}" ]] run crictl stops "$pod_id" echo "$output" @@ -36,32 +36,10 @@ function teardown() { stop_crio } +@test "pod disable shared pid namespace" { + ENABLE_SHARED_PID_NAMESPACE=false pid_namespace_test +} + @test "pod enable shared pid namespace" { - ENABLE_SHARED_PID_NAMESPACE="true" start_crio - - run crictl runs "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - pod_id="$output" - run crictl create "$pod_id" "$TESTDATA"/container_redis.json "$TESTDATA"/sandbox_config.json - echo "$output" - [ "$status" -eq 0 ] - ctr_id="$output" - run crictl start "$ctr_id" - [ "$status" -eq 0 ] - - run crictl exec --sync "$ctr_id" cat /proc/1/cmdline - echo "$output" - [ "$status" -eq 0 ] - [[ "$output" =~ "pause" ]] - - run crictl stops "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - run crictl rms "$pod_id" - echo "$output" - [ "$status" -eq 0 ] - cleanup_ctrs - cleanup_pods - stop_crio + ENABLE_SHARED_PID_NAMESPACE=true EXPECTED_INIT=pause pid_namespace_test } From 8dbc2d1fff076a05dd47a5809d048e20dc772d64 Mon Sep 17 00:00:00 2001 From: "W. Trevor King" Date: Mon, 22 Jan 2018 15:47:33 -0800 Subject: [PATCH 58/59] Makefile: Use 'git diff' to show gofmt changes This makes fixing errors easier. Before this commit, errors looked like [1]: $ make gofmt !!! 'gofmt -s' needs to be run on the following files: ./lib/config.go make: *** [gofmt] Error 1 But that's not very helpful when your local gofmt thinks the file is fine. With this commit, errors will look like: $ make gofmt find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+ git diff --exit-code diff --git a/lib/config.go b/lib/config.go index 1acca8c7..6a63b2b0 100644 --- a/lib/config.go +++ b/lib/config.go @@ -2,7 +2,7 @@ package lib import ( "bytes" -"io/ioutil" + "io/ioutil" "github.com/BurntSushi/toml" "github.com/kubernetes-incubator/cri-o/oci" make: *** [Makefile:68: gofmt] Error 1 (or whatever, I just stuffed in a formatting error for demonstration purposes). Also remove the helper script in favor of direct Makefile calls, because with Git handling difference reporting and exit status, this becomes a simpler check. find's -exec, !, and -path arguments are specified in POSIX [2]. [1]: https://travis-ci.org/kubernetes-incubator/cri-o/jobs/331949394#L1075 [2]: http://pubs.opengroup.org/onlinepubs/9699919799/utilities/find.html Signed-off-by: W. Trevor King --- Makefile | 3 ++- hack/verify-gofmt.sh | 21 --------------------- 2 files changed, 2 insertions(+), 22 deletions(-) delete mode 100755 hack/verify-gofmt.sh diff --git a/Makefile b/Makefile index 86898afc..7cc1a4c1 100644 --- a/Makefile +++ b/Makefile @@ -64,7 +64,8 @@ lint: .gopathok @./.tool/lint gofmt: - @./hack/verify-gofmt.sh + find . -name '*.go' ! -path './vendor/*' -exec gofmt -s -w {} \+ + git diff --exit-code conmon: $(MAKE) -C $@ diff --git a/hack/verify-gofmt.sh b/hack/verify-gofmt.sh deleted file mode 100755 index 5577d1b9..00000000 --- a/hack/verify-gofmt.sh +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash - -set -o errexit -set -o nounset -set -o pipefail - -find_files() { - find . -not \( \ - \( \ - -wholename '*/vendor/*' \ - \) -prune \ - \) -name '*.go' -} - -GOFMT="gofmt -s" -bad_files=$(find_files | xargs $GOFMT -l) -if [[ -n "${bad_files}" ]]; then - echo "!!! '$GOFMT' needs to be run on the following files: " - echo "${bad_files}" - exit 1 -fi From a50f352eb417c431c6614eaa0e031ce52cf4bc42 Mon Sep 17 00:00:00 2001 From: Kei Sawada Date: Wed, 24 Jan 2018 14:47:16 +0900 Subject: [PATCH 59/59] Update tutorial.md to fix a few minor errors Signed-off-by: Kei Sawada --- tutorial.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tutorial.md b/tutorial.md index 693c9c96..5f89ccb8 100644 --- a/tutorial.md +++ b/tutorial.md @@ -300,15 +300,15 @@ cd $GOPATH/src/github.com/kubernetes-incubator/cri-o Next create the Pod and capture the Pod ID for later use: ``` -POD_ID=$(sudo crictl runs test/testdata/sandbox_config.json) +POD_ID=$(sudo crictl runp test/testdata/sandbox_config.json) ``` -> sudo crictl runs test/testdata/sandbox_config.json +> sudo crictl runp test/testdata/sandbox_config.json Use the `crictl` command to get the status of the Pod: ``` -sudo crictl inspects --output table $POD_ID +sudo crictl inspectp --output table $POD_ID ``` Output: