Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
1
vendor/k8s.io/kubernetes/plugin/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/plugin/BUILD
generated
vendored
|
@ -34,7 +34,6 @@ filegroup(
|
|||
"//plugin/pkg/admission/serviceaccount:all-srcs",
|
||||
"//plugin/pkg/admission/storageclass/default:all-srcs",
|
||||
"//plugin/pkg/auth:all-srcs",
|
||||
"//plugin/pkg/client/auth:all-srcs",
|
||||
"//plugin/pkg/scheduler:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
|
|
8
vendor/k8s.io/kubernetes/plugin/OWNERS
generated
vendored
8
vendor/k8s.io/kubernetes/plugin/OWNERS
generated
vendored
|
@ -1,4 +1,10 @@
|
|||
assignees:
|
||||
reviewers:
|
||||
- brendandburns
|
||||
- davidopp
|
||||
- dchen1107
|
||||
- lavalamp
|
||||
- thockin
|
||||
approvers:
|
||||
- brendandburns
|
||||
- davidopp
|
||||
- dchen1107
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/OWNERS
generated
vendored
|
@ -1,2 +1,4 @@
|
|||
assignees:
|
||||
- davidopp
|
||||
approvers:
|
||||
- davidopp
|
||||
reviewers:
|
||||
- davidopp
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/BUILD
generated
vendored
|
@ -18,8 +18,6 @@ go_library(
|
|||
"//pkg/client/leaderelection:go_default_library",
|
||||
"//pkg/client/leaderelection/resourcelock:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/util/configz:go_default_library",
|
||||
"//plugin/cmd/kube-scheduler/app/options:go_default_library",
|
||||
"//plugin/pkg/scheduler:go_default_library",
|
||||
|
@ -31,8 +29,11 @@ go_library(
|
|||
"//vendor:github.com/prometheus/client_golang/prometheus",
|
||||
"//vendor:github.com/spf13/cobra",
|
||||
"//vendor:github.com/spf13/pflag",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/healthz",
|
||||
"//vendor:k8s.io/apiserver/pkg/server/healthz",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/options.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options/options.go
generated
vendored
|
@ -66,7 +66,7 @@ func (s *SchedulerServer) AddFlags(fs *pflag.FlagSet) {
|
|||
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
|
||||
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
|
||||
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's annotation with key 'scheduler.alpha.kubernetes.io/name'")
|
||||
fs.StringVar(&s.SchedulerName, "scheduler-name", s.SchedulerName, "Name of the scheduler, used to select which pods will be processed by this scheduler, based on pod's \"spec.SchedulerName\".")
|
||||
fs.IntVar(&s.HardPodAffinitySymmetricWeight, "hard-pod-affinity-symmetric-weight", api.DefaultHardPodAffinitySymmetricWeight,
|
||||
"RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule corresponding "+
|
||||
"to every RequiredDuringScheduling affinity rule. --hard-pod-affinity-symmetric-weight represents the weight of implicit PreferredDuringScheduling affinity rule.")
|
||||
|
|
9
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/server.go
generated
vendored
|
@ -27,16 +27,17 @@ import (
|
|||
goruntime "runtime"
|
||||
"strconv"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/healthz"
|
||||
"k8s.io/apiserver/pkg/server/healthz"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
"k8s.io/kubernetes/pkg/util/configz"
|
||||
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||
|
@ -101,7 +102,7 @@ func Run(s *options.SchedulerServer) error {
|
|||
}
|
||||
// TODO: enable other lock types
|
||||
rl := &resourcelock.EndpointsLock{
|
||||
EndpointsMeta: v1.ObjectMeta{
|
||||
EndpointsMeta: metav1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "kube-scheduler",
|
||||
},
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/OWNERS
generated
vendored
|
@ -1,2 +1,4 @@
|
|||
assignees:
|
||||
- derekwaynecarr
|
||||
approvers:
|
||||
- derekwaynecarr
|
||||
reviewers:
|
||||
- derekwaynecarr
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/BUILD
generated
vendored
|
@ -12,7 +12,7 @@ go_library(
|
|||
name = "go_default_library",
|
||||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/admission:go_default_library"],
|
||||
deps = ["//vendor:k8s.io/apiserver/pkg/admission"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/admit/admission.go
generated
vendored
|
@ -19,7 +19,7 @@ package admit
|
|||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/BUILD
generated
vendored
|
@ -13,9 +13,9 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -25,9 +25,10 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission.go
generated
vendored
|
@ -28,7 +28,7 @@ import (
|
|||
"io"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/alwayspullimages/admission_test.go
generated
vendored
|
@ -19,8 +19,9 @@ package alwayspullimages
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
@ -30,7 +31,7 @@ func TestAdmission(t *testing.T) {
|
|||
namespace := "test"
|
||||
handler := &alwaysPullImages{}
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
InitContainers: []api.Container{
|
||||
{Name: "init1", Image: "image"},
|
||||
|
@ -68,7 +69,7 @@ func TestOtherResources(t *testing.T) {
|
|||
namespace := "testnamespace"
|
||||
name := "testname"
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "ctr2", Image: "image", ImagePullPolicy: api.PullNever},
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/BUILD
generated
vendored
|
@ -16,10 +16,10 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -29,10 +29,10 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/antiaffinity/admission_test.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
@ -250,7 +250,7 @@ func TestOtherResources(t *testing.T) {
|
|||
namespace := "testnamespace"
|
||||
name := "testname"
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace},
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/BUILD
generated
vendored
|
@ -12,7 +12,7 @@ go_library(
|
|||
name = "go_default_library",
|
||||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//pkg/admission:go_default_library"],
|
||||
deps = ["//vendor:k8s.io/apiserver/pkg/admission"],
|
||||
)
|
||||
|
||||
go_test(
|
||||
|
@ -21,8 +21,8 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission.go
generated
vendored
|
@ -20,7 +20,7 @@ import (
|
|||
"errors"
|
||||
"io"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/deny/admission_test.go
generated
vendored
|
@ -19,7 +19,7 @@ package deny
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/BUILD
generated
vendored
|
@ -13,13 +13,13 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/genericapiserver/api/rest:go_default_library",
|
||||
"//pkg/genericapiserver/registry/rest:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -29,12 +29,13 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/genericapiserver/api/rest:go_default_library",
|
||||
"//pkg/genericapiserver/registry/rest:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission.go
generated
vendored
|
@ -22,10 +22,10 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/api/rest"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/registry/rest"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
)
|
||||
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/exec/admission_test.go
generated
vendored
|
@ -19,12 +19,13 @@ package exec
|
|||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/api/rest"
|
||||
"k8s.io/kubernetes/pkg/genericapiserver/registry/rest"
|
||||
)
|
||||
|
||||
func TestAdmission(t *testing.T) {
|
||||
|
@ -207,7 +208,7 @@ func TestDenyExecOnPrivileged(t *testing.T) {
|
|||
|
||||
func validPod(name string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
{Name: "ctr1", Image: "image"},
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/BUILD
generated
vendored
|
@ -13,10 +13,10 @@ go_library(
|
|||
srcs = ["gc_admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
],
|
||||
)
|
||||
|
@ -27,11 +27,11 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
],
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission.go
generated
vendored
|
@ -22,8 +22,8 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
32
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission_test.go
generated
vendored
32
vendor/k8s.io/kubernetes/plugin/pkg/admission/gc/gc_admission_test.go
generated
vendored
|
@ -22,9 +22,9 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
@ -71,7 +71,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
name: "super-user, create, objectref change",
|
||||
username: "super",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
{
|
||||
|
@ -85,7 +85,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
name: "non-deleter, create, objectref change",
|
||||
username: "non-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: false,
|
||||
},
|
||||
{
|
||||
|
@ -99,14 +99,14 @@ func TestGCAdmission(t *testing.T) {
|
|||
name: "non-pod-deleter, create, objectref change",
|
||||
username: "non-pod-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: false,
|
||||
},
|
||||
{
|
||||
name: "non-pod-deleter, create, objectref change, but not a pod",
|
||||
username: "non-pod-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("not-pods"),
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
|
||||
|
@ -122,8 +122,8 @@ func TestGCAdmission(t *testing.T) {
|
|||
name: "super-user, update, no objectref change two",
|
||||
username: "super",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
oldObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
{
|
||||
|
@ -131,7 +131,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
username: "super",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
{
|
||||
|
@ -146,8 +146,8 @@ func TestGCAdmission(t *testing.T) {
|
|||
name: "non-deleter, update, no objectref change two",
|
||||
username: "non-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
oldObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
{
|
||||
|
@ -155,15 +155,15 @@ func TestGCAdmission(t *testing.T) {
|
|||
username: "non-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: false,
|
||||
},
|
||||
{
|
||||
name: "non-deleter, update, objectref change two",
|
||||
username: "non-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}, {Name: "second"}}}},
|
||||
oldObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}, {Name: "second"}}}},
|
||||
expectedAllowed: false,
|
||||
},
|
||||
{
|
||||
|
@ -179,7 +179,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
username: "non-pod-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("pods"),
|
||||
oldObj: &api.Pod{},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: false,
|
||||
},
|
||||
{
|
||||
|
@ -187,7 +187,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
username: "non-pod-deleter",
|
||||
resource: api.SchemeGroupVersion.WithResource("not-pods"),
|
||||
oldObj: &api.Pod{},
|
||||
newObj: &api.Pod{ObjectMeta: api.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
newObj: &api.Pod{ObjectMeta: metav1.ObjectMeta{OwnerReferences: []metav1.OwnerReference{{Name: "first"}}}},
|
||||
expectedAllowed: true,
|
||||
},
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ func TestGCAdmission(t *testing.T) {
|
|||
operation = admission.Update
|
||||
}
|
||||
user := &user.DefaultInfo{Name: tc.username}
|
||||
attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, api.NamespaceDefault, "foo", tc.resource, "", operation, user)
|
||||
attributes := admission.NewAttributesRecord(tc.newObj, tc.oldObj, schema.GroupVersionKind{}, metav1.NamespaceDefault, "foo", tc.resource, "", operation, user)
|
||||
|
||||
err := gcAdmit.Admit(attributes)
|
||||
switch {
|
||||
|
|
10
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/BUILD
generated
vendored
|
@ -17,15 +17,15 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/imagepolicy/install:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/yaml",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/cache",
|
||||
"//vendor:k8s.io/apiserver/pkg/webhook",
|
||||
"//vendor:k8s.io/client-go/pkg/api/errors",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/webhook",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/imagepolicy/install",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/imagepolicy/v1alpha1",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
|
@ -42,12 +42,12 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/imagepolicy/install:go_default_library",
|
||||
"//pkg/apis/imagepolicy/v1alpha1:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission.go
generated
vendored
|
@ -28,16 +28,16 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeschema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/util/cache"
|
||||
"k8s.io/apiserver/pkg/webhook"
|
||||
apierrors "k8s.io/client-go/pkg/api/errors"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
"k8s.io/client-go/pkg/apis/imagepolicy/v1alpha1"
|
||||
"k8s.io/client-go/rest"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
// install the clientgo image policy API for use with api registry
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/imagepolicy/admission_test.go
generated
vendored
|
@ -28,11 +28,11 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/imagepolicy/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1"
|
||||
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
|
|
10
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/BUILD
generated
vendored
|
@ -19,11 +19,8 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//vendor:cloud.google.com/go/compute/metadata",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/hawkular/hawkular-client-go/metrics",
|
||||
|
@ -32,6 +29,10 @@ go_library(
|
|||
"//vendor:golang.org/x/oauth2/google",
|
||||
"//vendor:google.golang.org/api/cloudmonitoring/v2beta2",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -44,10 +45,11 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/require",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission.go
generated
vendored
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission_test.go
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/admission_test.go
generated
vendored
|
@ -20,7 +20,8 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
)
|
||||
|
@ -57,7 +58,7 @@ func addContainer(pod *api.Pod, name, image string, request api.ResourceList) {
|
|||
|
||||
func createPod(name string, image string, request api.ResourceList) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test-ns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test-ns"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = []api.Container{}
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/initialresources/hawkular.go
generated
vendored
|
@ -29,10 +29,11 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"github.com/hawkular/hawkular-client-go/metrics"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
type hawkularSource struct {
|
||||
|
@ -90,7 +91,7 @@ func (hs *hawkularSource) GetUsagePercentile(kind api.ResourceName, perc int64,
|
|||
m := make([]metrics.Modifier, len(hs.modifiers), 2+len(hs.modifiers))
|
||||
copy(m, hs.modifiers)
|
||||
|
||||
if namespace != api.NamespaceAll {
|
||||
if namespace != metav1.NamespaceAll {
|
||||
m = append(m, metrics.Tenant(namespace))
|
||||
}
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/BUILD
generated
vendored
|
@ -16,7 +16,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
|
@ -25,9 +24,11 @@ go_library(
|
|||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:github.com/hashicorp/golang-lru",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -37,7 +38,6 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
|
@ -48,6 +48,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission.go
generated
vendored
|
@ -26,10 +26,11 @@ import (
|
|||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -112,7 +113,7 @@ func (l *limitRanger) Admit(a admission.Attributes) (err error) {
|
|||
// If there is already in-flight List() for a given namespace, we should wait until
|
||||
// it is finished and cache is updated instead of doing the same, also to avoid
|
||||
// throttling - see #22422 for details.
|
||||
liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(api.ListOptions{})
|
||||
liveList, err := l.client.Core().LimitRanges(a.GetNamespace()).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return admission.NewForbidden(a, err)
|
||||
}
|
||||
|
|
12
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission_test.go
generated
vendored
12
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/admission_test.go
generated
vendored
|
@ -25,7 +25,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -64,7 +64,7 @@ func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequ
|
|||
// createLimitRange creates a limit range with the specified data
|
||||
func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRequest, maxLimitRequestRatio api.ResourceList) api.LimitRange {
|
||||
return api.LimitRange{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "test",
|
||||
},
|
||||
|
@ -85,7 +85,7 @@ func createLimitRange(limitType api.LimitType, min, max, defaultLimit, defaultRe
|
|||
|
||||
func validLimitRange() api.LimitRange {
|
||||
return api.LimitRange{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "test",
|
||||
},
|
||||
|
@ -110,7 +110,7 @@ func validLimitRange() api.LimitRange {
|
|||
|
||||
func validLimitRangeNoDefaults() api.LimitRange {
|
||||
return api.LimitRange{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "abc",
|
||||
Namespace: "test",
|
||||
},
|
||||
|
@ -133,7 +133,7 @@ func validLimitRangeNoDefaults() api.LimitRange {
|
|||
|
||||
func validPod(name string, numContainers int, resources api.ResourceRequirements) api.Pod {
|
||||
pod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = make([]api.Container, 0, numContainers)
|
||||
|
@ -602,7 +602,7 @@ func newHandlerForTest(c clientset.Interface) (admission.Interface, informers.Sh
|
|||
|
||||
func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) api.PersistentVolumeClaim {
|
||||
pvc := api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: resources,
|
||||
},
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/limitranger/interfaces.go
generated
vendored
|
@ -18,7 +18,7 @@ package limitranger
|
|||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/BUILD
generated
vendored
|
@ -13,13 +13,14 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -29,7 +30,6 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
|
@ -40,6 +40,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/autoprovision/admission.go
generated
vendored
|
@ -21,7 +21,8 @@ import (
|
|||
"io"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -59,7 +60,7 @@ func (p *provision) Admit(a admission.Attributes) (err error) {
|
|||
return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
|
||||
}
|
||||
namespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.GetNamespace(),
|
||||
Namespace: "",
|
||||
},
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
|
@ -55,7 +55,7 @@ func newMockClientForTest(namespaces []string) *fake.Clientset {
|
|||
}
|
||||
for i, ns := range namespaces {
|
||||
namespaceList.Items = append(namespaceList.Items, api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns,
|
||||
ResourceVersion: fmt.Sprintf("%d", i),
|
||||
},
|
||||
|
@ -69,7 +69,7 @@ func newMockClientForTest(namespaces []string) *fake.Clientset {
|
|||
// newPod returns a new pod for the specified namespace
|
||||
func newPod(namespace string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/BUILD
generated
vendored
|
@ -13,7 +13,6 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
|
@ -21,6 +20,7 @@ go_library(
|
|||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -30,7 +30,6 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
|
@ -40,6 +39,7 @@ go_test(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -61,7 +61,7 @@ func (e *exists) Admit(a admission.Attributes) (err error) {
|
|||
return admission.NewForbidden(a, fmt.Errorf("not yet ready to handle request"))
|
||||
}
|
||||
namespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: a.GetNamespace(),
|
||||
Namespace: "",
|
||||
},
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission_test.go
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/exists/admission_test.go
generated
vendored
|
@ -24,7 +24,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
|
@ -54,7 +54,7 @@ func newMockClientForTest(namespaces []string) *fake.Clientset {
|
|||
}
|
||||
for i, ns := range namespaces {
|
||||
namespaceList.Items = append(namespaceList.Items, api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ns,
|
||||
ResourceVersion: fmt.Sprintf("%d", i),
|
||||
},
|
||||
|
@ -68,7 +68,7 @@ func newMockClientForTest(namespaces []string) *fake.Clientset {
|
|||
// newPod returns a new pod for the specified namespace
|
||||
func newPod(namespace string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
|
|
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/BUILD
generated
vendored
|
@ -13,18 +13,18 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/cache",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -34,18 +34,18 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/testing/core:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/util/clock:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/client-go/util/clock",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/admission.go
generated
vendored
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/admission.go
generated
vendored
|
@ -26,14 +26,14 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
utilcache "k8s.io/apiserver/pkg/util/cache"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -51,7 +51,7 @@ const (
|
|||
|
||||
func init() {
|
||||
admission.RegisterPlugin(PluginName, func(config io.Reader) (admission.Interface, error) {
|
||||
return NewLifecycle(sets.NewString(api.NamespaceDefault, api.NamespaceSystem))
|
||||
return NewLifecycle(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -76,7 +76,7 @@ var _ = kubeapiserveradmission.WantsInternalClientSet(&lifecycle{})
|
|||
|
||||
func makeNamespaceKey(namespace string) *api.Namespace {
|
||||
return &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: namespace,
|
||||
Namespace: "",
|
||||
},
|
||||
|
|
14
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/admission_test.go
generated
vendored
14
vendor/k8s.io/kubernetes/plugin/pkg/admission/namespace/lifecycle/admission_test.go
generated
vendored
|
@ -25,14 +25,14 @@ import (
|
|||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/client-go/util/clock"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
kubeadmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
)
|
||||
|
||||
// newHandlerForTest returns a configured handler for testing.
|
||||
|
@ -43,7 +43,7 @@ func newHandlerForTest(c clientset.Interface) (admission.Interface, informers.Sh
|
|||
// newHandlerForTestWithClock returns a configured handler for testing.
|
||||
func newHandlerForTestWithClock(c clientset.Interface, cacheClock clock.Clock) (admission.Interface, informers.SharedInformerFactory, error) {
|
||||
f := informers.NewSharedInformerFactory(nil, c, 5*time.Minute)
|
||||
handler, err := newLifecycleWithClock(sets.NewString(api.NamespaceDefault, api.NamespaceSystem), cacheClock)
|
||||
handler, err := newLifecycleWithClock(sets.NewString(metav1.NamespaceDefault, metav1.NamespaceSystem), cacheClock)
|
||||
if err != nil {
|
||||
return nil, f, err
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ func newMockClientForTest(namespaces map[string]api.NamespacePhase) *fake.Client
|
|||
index := 0
|
||||
for name, phase := range namespaces {
|
||||
namespaceList.Items = append(namespaceList.Items, api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
ResourceVersion: fmt.Sprintf("%d", index),
|
||||
},
|
||||
|
@ -83,7 +83,7 @@ func newMockClientForTest(namespaces map[string]api.NamespacePhase) *fake.Client
|
|||
// newPod returns a new pod for the specified namespace
|
||||
func newPod(namespace string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "123", Namespace: namespace},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image"}},
|
||||
|
@ -168,7 +168,7 @@ func TestAdmissionNamespaceTerminating(t *testing.T) {
|
|||
}
|
||||
|
||||
// verify delete of namespace default can never proceed
|
||||
err = handler.Admit(admission.NewAttributesRecord(nil, nil, api.Kind("Namespace").WithVersion("version"), "", api.NamespaceDefault, api.Resource("namespaces").WithVersion("version"), "", admission.Delete, nil))
|
||||
err = handler.Admit(admission.NewAttributesRecord(nil, nil, api.Kind("Namespace").WithVersion("version"), "", metav1.NamespaceDefault, api.Resource("namespaces").WithVersion("version"), "", admission.Delete, nil))
|
||||
if err == nil {
|
||||
t.Errorf("Expected an error that this namespace can never be deleted")
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func TestAdmissionNamespaceForceLiveLookup(t *testing.T) {
|
|||
mockClient := newMockClientForTest(phases)
|
||||
mockClient.AddReactor("get", "namespaces", func(action core.Action) (bool, runtime.Object, error) {
|
||||
getCalls++
|
||||
return true, &api.Namespace{ObjectMeta: api.ObjectMeta{Name: namespace}, Status: api.NamespaceStatus{Phase: phases[namespace]}}, nil
|
||||
return true, &api.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}, Status: api.NamespaceStatus{Phase: phases[namespace]}}, nil
|
||||
})
|
||||
|
||||
fakeClock := clock.NewFakeClock(time.Now())
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/BUILD
generated
vendored
|
@ -16,13 +16,13 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//pkg/cloudprovider/providers/gce:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -32,10 +32,11 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/cloudprovider/providers/aws:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/persistentvolume/label/admission.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"sync"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
|
|
|
@ -21,8 +21,9 @@ import (
|
|||
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||
)
|
||||
|
@ -79,7 +80,7 @@ func TestAdmission(t *testing.T) {
|
|||
pvHandler := NewPersistentVolumeLabel()
|
||||
handler := admission.NewChainHandler(pvHandler)
|
||||
ignoredPV := api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
|
@ -89,7 +90,7 @@ func TestAdmission(t *testing.T) {
|
|||
},
|
||||
}
|
||||
awsPV := api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "noncloud", Namespace: "myns"},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
|
||||
|
|
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/BUILD
generated
vendored
|
@ -13,7 +13,6 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
|
@ -24,6 +23,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/yaml",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -33,14 +33,15 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/controller/informers:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission.go
generated
vendored
|
@ -27,7 +27,7 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -115,7 +115,7 @@ func (p *podNodeSelector) Admit(a admission.Attributes) error {
|
|||
var namespace *api.Namespace
|
||||
|
||||
namespaceObj, exists, err := p.namespaceInformer.GetStore().Get(&api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: nsName,
|
||||
Namespace: "",
|
||||
},
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission_test.go
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/podnodeselector/admission_test.go
generated
vendored
|
@ -20,9 +20,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
|
@ -33,7 +34,7 @@ import (
|
|||
// TestPodAdmission verifies various scenarios involving pod/namespace/global node label selectors
|
||||
func TestPodAdmission(t *testing.T) {
|
||||
namespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testNamespace",
|
||||
Namespace: "",
|
||||
},
|
||||
|
@ -47,7 +48,7 @@ func TestPodAdmission(t *testing.T) {
|
|||
informerFactory.Start(wait.NeverStop)
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testPod", Namespace: "testNamespace"},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/BUILD
generated
vendored
|
@ -18,9 +18,7 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
|
@ -32,11 +30,13 @@ go_library(
|
|||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/hashicorp/golang-lru",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/meta",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -46,7 +46,6 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
|
@ -56,8 +55,10 @@ go_test(
|
|||
"//pkg/quota/generic:go_default_library",
|
||||
"//pkg/quota/install:go_default_library",
|
||||
"//vendor:github.com/hashicorp/golang-lru",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"io"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
|
|
43
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission_test.go
generated
vendored
43
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/admission_test.go
generated
vendored
|
@ -24,9 +24,10 @@ import (
|
|||
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
|
@ -57,7 +58,7 @@ func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequ
|
|||
|
||||
func validPod(name string, numContainers int, resources api.ResourceRequirements) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PodSpec{},
|
||||
}
|
||||
pod.Spec.Containers = make([]api.Container, 0, numContainers)
|
||||
|
@ -72,7 +73,7 @@ func validPod(name string, numContainers int, resources api.ResourceRequirements
|
|||
|
||||
func validPersistentVolumeClaim(name string, resources api.ResourceRequirements) *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{Name: name, Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: "test"},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: resources,
|
||||
},
|
||||
|
@ -184,7 +185,7 @@ func TestAdmissionIgnoresSubresources(t *testing.T) {
|
|||
// TestAdmitBelowQuotaLimit verifies that a pod when created has its usage reflected on the quota
|
||||
func TestAdmitBelowQuotaLimit(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
|
@ -264,7 +265,7 @@ func TestAdmitBelowQuotaLimit(t *testing.T) {
|
|||
func TestAdmitHandlesOldObjects(t *testing.T) {
|
||||
// in this scenario, the old quota was based on a service type=loadbalancer
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceServices: resource.MustParse("10"),
|
||||
|
@ -298,11 +299,11 @@ func TestAdmitHandlesOldObjects(t *testing.T) {
|
|||
|
||||
// old service was a load balancer, but updated version is a node port.
|
||||
existingService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: "1"},
|
||||
Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
|
||||
}
|
||||
newService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeNodePort,
|
||||
Ports: []api.ServicePort{{Port: 1234}},
|
||||
|
@ -360,7 +361,7 @@ func TestAdmitHandlesOldObjects(t *testing.T) {
|
|||
func TestAdmitHandlesCreatingUpdates(t *testing.T) {
|
||||
// in this scenario, there is an existing service
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceServices: resource.MustParse("10"),
|
||||
|
@ -394,11 +395,11 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) {
|
|||
|
||||
// old service didn't exist, so this update is actually a create
|
||||
oldService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: ""},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test", ResourceVersion: ""},
|
||||
Spec: api.ServiceSpec{Type: api.ServiceTypeLoadBalancer},
|
||||
}
|
||||
newService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "service", Namespace: "test"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "service", Namespace: "test"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeNodePort,
|
||||
Ports: []api.ServicePort{{Port: 1234}},
|
||||
|
@ -455,7 +456,7 @@ func TestAdmitHandlesCreatingUpdates(t *testing.T) {
|
|||
// TestAdmitExceedQuotaLimit verifies that if a pod exceeded allowed usage that its rejected during admission.
|
||||
func TestAdmitExceedQuotaLimit(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
|
@ -496,7 +497,7 @@ func TestAdmitExceedQuotaLimit(t *testing.T) {
|
|||
// We ensure that a pod that does not specify a memory limit that it fails in admission.
|
||||
func TestAdmitEnforceQuotaConstraints(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
|
@ -544,7 +545,7 @@ func TestAdmitEnforceQuotaConstraints(t *testing.T) {
|
|||
// TestAdmitPodInNamespaceWithoutQuota ensures that if a namespace has no quota, that a pod can get in
|
||||
func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "other", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
|
@ -595,7 +596,7 @@ func TestAdmitPodInNamespaceWithoutQuota(t *testing.T) {
|
|||
// It ensures that the terminating quota is incremented, and the non-terminating quota is not.
|
||||
func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
|
||||
resourceQuotaNonTerminating := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-non-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotTerminating},
|
||||
},
|
||||
|
@ -613,7 +614,7 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
|
|||
},
|
||||
}
|
||||
resourceQuotaTerminating := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-terminating", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeTerminating},
|
||||
},
|
||||
|
@ -707,7 +708,7 @@ func TestAdmitBelowTerminatingQuotaLimit(t *testing.T) {
|
|||
// It verifies that best effort pods are properly scoped to the best effort quota document.
|
||||
func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) {
|
||||
resourceQuotaBestEffort := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
|
@ -721,7 +722,7 @@ func TestAdmitBelowBestEffortQuotaLimit(t *testing.T) {
|
|||
},
|
||||
}
|
||||
resourceQuotaNotBestEffort := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-not-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeNotBestEffort},
|
||||
},
|
||||
|
@ -812,7 +813,7 @@ func removeListWatch(in []testcore.Action) []testcore.Action {
|
|||
// guaranteed pod.
|
||||
func TestAdmitBestEffortQuotaLimitIgnoresBurstable(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota-besteffort", Namespace: "test", ResourceVersion: "124"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Scopes: []api.ResourceQuotaScope{api.ResourceQuotaScopeBestEffort},
|
||||
},
|
||||
|
@ -898,7 +899,7 @@ func TestHasUsageStats(t *testing.T) {
|
|||
func TestAdmissionSetsMissingNamespace(t *testing.T) {
|
||||
namespace := "test"
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: namespace, ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("3"),
|
||||
|
@ -953,7 +954,7 @@ func TestAdmissionSetsMissingNamespace(t *testing.T) {
|
|||
// TestAdmitRejectsNegativeUsage verifies that usage for any measured resource cannot be negative.
|
||||
func TestAdmitRejectsNegativeUsage(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourcePersistentVolumeClaims: resource.MustParse("3"),
|
||||
|
@ -998,7 +999,7 @@ func TestAdmitRejectsNegativeUsage(t *testing.T) {
|
|||
// TestAdmitWhenUnrelatedResourceExceedsQuota verifies that if resource X exceeds quota, it does not prohibit resource Y from admission.
|
||||
func TestAdmitWhenUnrelatedResourceExceedsQuota(t *testing.T) {
|
||||
resourceQuota := &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "quota", Namespace: "test", ResourceVersion: "124"},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceServices: resource.MustParse("3"),
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/controller.go
generated
vendored
|
@ -29,7 +29,7 @@ import (
|
|||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
|
|
21
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go
generated
vendored
21
vendor/k8s.io/kubernetes/plugin/pkg/admission/resourcequota/resource_access.go
generated
vendored
|
@ -23,14 +23,13 @@ import (
|
|||
"github.com/golang/glog"
|
||||
lru "github.com/hashicorp/golang-lru"
|
||||
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/storage/etcd"
|
||||
)
|
||||
|
||||
|
@ -74,15 +73,11 @@ func newQuotaAccessor(client clientset.Interface) (*quotaAccessor, error) {
|
|||
return nil, err
|
||||
}
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().ResourceQuotas(api.NamespaceAll).List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ResourceQuotas(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().ResourceQuotas(api.NamespaceAll).Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ResourceQuotas(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
}
|
||||
indexer, reflector := cache.NewNamespaceKeyedIndexerAndReflector(lw, &api.ResourceQuota{}, 0)
|
||||
|
@ -141,7 +136,7 @@ func (e *quotaAccessor) checkCache(quota *api.ResourceQuota) *api.ResourceQuota
|
|||
func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error) {
|
||||
// determine if there are any quotas in this namespace
|
||||
// if there are no quotas, we don't need to do anything
|
||||
items, err := e.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: ""}})
|
||||
items, err := e.indexer.Index("namespace", &api.ResourceQuota{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: ""}})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error resolving quota.")
|
||||
}
|
||||
|
@ -155,7 +150,7 @@ func (e *quotaAccessor) GetQuotas(namespace string) ([]api.ResourceQuota, error)
|
|||
// If there is already in-flight List() for a given namespace, we should wait until
|
||||
// it is finished and cache is updated instead of doing the same, also to avoid
|
||||
// throttling - see #22422 for details.
|
||||
liveList, err := e.client.Core().ResourceQuotas(namespace).List(api.ListOptions{})
|
||||
liveList, err := e.client.Core().ResourceQuotas(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD
generated
vendored
7
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/BUILD
generated
vendored
|
@ -13,9 +13,7 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
|
@ -27,9 +25,11 @@ go_library(
|
|||
"//pkg/util/maps:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/validation/field",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
],
|
||||
|
@ -41,7 +41,6 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/extensions:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
|
@ -50,8 +49,10 @@ go_test(
|
|||
"//pkg/security/podsecuritypolicy/seccomp:go_default_library",
|
||||
"//pkg/security/podsecuritypolicy/util:go_default_library",
|
||||
"//vendor:github.com/stretchr/testify/assert",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
],
|
||||
|
|
16
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go
generated
vendored
16
vendor/k8s.io/kubernetes/plugin/pkg/admission/security/podsecuritypolicy/admission.go
generated
vendored
|
@ -24,14 +24,14 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
|
@ -108,15 +108,11 @@ func (a *podSecurityPolicyPlugin) SetInternalClientSet(client internalclientset.
|
|||
a.store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
a.reflector = cache.NewReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Extensions().PodSecurityPolicies().List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Extensions().PodSecurityPolicies().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Extensions().PodSecurityPolicies().Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Extensions().PodSecurityPolicies().Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.PodSecurityPolicy{},
|
||||
|
|
|
@ -24,11 +24,12 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
kadmission "k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
kadmission "k8s.io/kubernetes/pkg/admission"
|
||||
kapi "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
|
@ -172,7 +173,7 @@ func TestAdmitSeccomp(t *testing.T) {
|
|||
psp := restrictivePSP()
|
||||
psp.Annotations = v.pspAnnotations
|
||||
pod := &kapi.Pod{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: v.podAnnotations,
|
||||
},
|
||||
Spec: kapi.PodSpec{
|
||||
|
@ -1457,7 +1458,7 @@ func TestCreateProvidersFromConstraints(t *testing.T) {
|
|||
"valid psp": {
|
||||
psp: func() *extensions.PodSecurityPolicy {
|
||||
return &extensions.PodSecurityPolicy{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "valid psp",
|
||||
},
|
||||
Spec: extensions.PodSecurityPolicySpec{
|
||||
|
@ -1480,7 +1481,7 @@ func TestCreateProvidersFromConstraints(t *testing.T) {
|
|||
"bad psp strategy options": {
|
||||
psp: func() *extensions.PodSecurityPolicy {
|
||||
return &extensions.PodSecurityPolicy{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "bad psp user options",
|
||||
},
|
||||
Spec: extensions.PodSecurityPolicySpec{
|
||||
|
@ -1650,7 +1651,7 @@ func TestGetMatchingPolicies(t *testing.T) {
|
|||
|
||||
func restrictivePSP() *extensions.PodSecurityPolicy {
|
||||
return &extensions.PodSecurityPolicy{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "restrictive",
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
|
@ -1685,7 +1686,7 @@ func restrictivePSP() *extensions.PodSecurityPolicy {
|
|||
|
||||
func createNamespaceForTest() *kapi.Namespace {
|
||||
return &kapi.Namespace{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default",
|
||||
},
|
||||
}
|
||||
|
@ -1693,7 +1694,7 @@ func createNamespaceForTest() *kapi.Namespace {
|
|||
|
||||
func createSAForTest() *kapi.ServiceAccount {
|
||||
return &kapi.ServiceAccount{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "default",
|
||||
},
|
||||
|
@ -1705,7 +1706,7 @@ func createSAForTest() *kapi.ServiceAccount {
|
|||
// psp when defaults are filled in.
|
||||
func goodPod() *kapi.Pod {
|
||||
return &kapi.Pod{
|
||||
ObjectMeta: kapi.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
Spec: kapi.PodSpec{
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/BUILD
generated
vendored
|
@ -13,9 +13,9 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -25,8 +25,8 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/admission/securitycontext/scdeny/admission.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"io"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ package scdeny
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
|
|
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD
generated
vendored
8
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/BUILD
generated
vendored
|
@ -16,21 +16,20 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//pkg/serviceaccount:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
"//vendor:k8s.io/apiserver/pkg/storage/names",
|
||||
],
|
||||
)
|
||||
|
@ -41,12 +40,13 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/kubelet/types:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
37
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go
generated
vendored
37
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission.go
generated
vendored
|
@ -25,17 +25,16 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
|
||||
kubelet "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/serviceaccount"
|
||||
|
@ -108,15 +107,11 @@ func (a *serviceAccount) SetInternalClientSet(cl internalclientset.Interface) {
|
|||
a.client = cl
|
||||
a.serviceAccounts, a.serviceAccountsReflector = cache.NewNamespaceKeyedIndexerAndReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return cl.Core().ServiceAccounts(api.NamespaceAll).List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return cl.Core().ServiceAccounts(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return cl.Core().ServiceAccounts(api.NamespaceAll).Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return cl.Core().ServiceAccounts(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
|
@ -126,17 +121,13 @@ func (a *serviceAccount) SetInternalClientSet(cl internalclientset.Interface) {
|
|||
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
a.secrets, a.secretsReflector = cache.NewNamespaceKeyedIndexerAndReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
internalOptions.FieldSelector = tokenSelector
|
||||
return cl.Core().Secrets(api.NamespaceAll).List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector.String()
|
||||
return cl.Core().Secrets(metav1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
internalOptions.FieldSelector = tokenSelector
|
||||
return cl.Core().Secrets(api.NamespaceAll).Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector.String()
|
||||
return cl.Core().Secrets(metav1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
|
@ -265,7 +256,7 @@ func (s *serviceAccount) enforceMountableSecrets(serviceAccount *api.ServiceAcco
|
|||
|
||||
// getServiceAccount returns the ServiceAccount for the given namespace and name if it exists
|
||||
func (s *serviceAccount) getServiceAccount(namespace string, name string) (*api.ServiceAccount, error) {
|
||||
key := &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: namespace}}
|
||||
key := &api.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
|
||||
index, err := s.serviceAccounts.Index("namespace", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -327,7 +318,7 @@ func (s *serviceAccount) getReferencedServiceAccountToken(serviceAccount *api.Se
|
|||
|
||||
// getServiceAccountTokens returns all ServiceAccountToken secrets for the given ServiceAccount
|
||||
func (s *serviceAccount) getServiceAccountTokens(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) {
|
||||
key := &api.Secret{ObjectMeta: api.ObjectMeta{Namespace: serviceAccount.Namespace}}
|
||||
key := &api.Secret{ObjectMeta: metav1.ObjectMeta{Namespace: serviceAccount.Namespace}}
|
||||
index, err := s.secrets.Index("namespace", key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
37
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go
generated
vendored
37
vendor/k8s.io/kubernetes/plugin/pkg/admission/serviceaccount/admission_test.go
generated
vendored
|
@ -22,8 +22,9 @@ import (
|
|||
"testing"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
kubelet "k8s.io/kubernetes/pkg/kubelet/types"
|
||||
|
@ -69,7 +70,7 @@ func TestIgnoresNonPodObject(t *testing.T) {
|
|||
|
||||
func TestIgnoresMirrorPod(t *testing.T) {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
kubelet.ConfigMirrorAnnotationKey: "true",
|
||||
},
|
||||
|
@ -89,7 +90,7 @@ func TestIgnoresMirrorPod(t *testing.T) {
|
|||
|
||||
func TestRejectsMirrorPodWithServiceAccount(t *testing.T) {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
kubelet.ConfigMirrorAnnotationKey: "true",
|
||||
},
|
||||
|
@ -107,7 +108,7 @@ func TestRejectsMirrorPodWithServiceAccount(t *testing.T) {
|
|||
|
||||
func TestRejectsMirrorPodWithSecretVolumes(t *testing.T) {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
kubelet.ConfigMirrorAnnotationKey: "true",
|
||||
},
|
||||
|
@ -135,7 +136,7 @@ func TestAssignsDefaultServiceAccountAndToleratesMissingAPIToken(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -162,7 +163,7 @@ func TestAssignsDefaultServiceAccountAndRejectsMissingAPIToken(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -181,7 +182,7 @@ func TestFetchesUncachedServiceAccount(t *testing.T) {
|
|||
|
||||
// Build a test client that the admission plugin can use to look up the service account missing from its cache
|
||||
client := fake.NewSimpleClientset(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -245,7 +246,7 @@ func TestAutomountsAPIToken(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns with a token into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: ns,
|
||||
UID: types.UID(serviceAccountUID),
|
||||
|
@ -256,7 +257,7 @@ func TestAutomountsAPIToken(t *testing.T) {
|
|||
})
|
||||
// Add a token for the service account into the cache
|
||||
admit.secrets.Add(&api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tokenName,
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{
|
||||
|
@ -345,7 +346,7 @@ func TestRespectsExistingMount(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns with a token into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceAccountName,
|
||||
Namespace: ns,
|
||||
UID: types.UID(serviceAccountUID),
|
||||
|
@ -356,7 +357,7 @@ func TestRespectsExistingMount(t *testing.T) {
|
|||
})
|
||||
// Add a token for the service account into the cache
|
||||
admit.secrets.Add(&api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: tokenName,
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{
|
||||
|
@ -442,7 +443,7 @@ func TestAllowsReferencedSecret(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns with a secret reference into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -522,7 +523,7 @@ func TestRejectsUnreferencedSecretVolumes(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -599,7 +600,7 @@ func TestAllowUnreferencedSecretVolumesForPermissiveSAs(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
Annotations: map[string]string{EnforceMountableSecretsAnnotation: "true"},
|
||||
|
@ -630,7 +631,7 @@ func TestAllowsReferencedImagePullSecrets(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns with a secret reference into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -661,7 +662,7 @@ func TestRejectsUnreferencedImagePullSecrets(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -689,7 +690,7 @@ func TestDoNotAddImagePullSecrets(t *testing.T) {
|
|||
|
||||
// Add the default service account for the ns with a secret reference into the cache
|
||||
admit.serviceAccounts.Add(&api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
@ -724,7 +725,7 @@ func TestAddImagePullSecrets(t *testing.T) {
|
|||
admit.RequireAPIToken = false
|
||||
|
||||
sa := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceAccountName,
|
||||
Namespace: ns,
|
||||
},
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/BUILD
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/BUILD
generated
vendored
|
@ -13,9 +13,7 @@ go_library(
|
|||
srcs = ["admission.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/apis/storage/util:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
|
@ -23,8 +21,10 @@ go_library(
|
|||
"//pkg/kubeapiserver/admission:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -34,12 +34,12 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/admission:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/storage:go_default_library",
|
||||
"//pkg/apis/storage/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/admission",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
16
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/admission.go
generated
vendored
16
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/admission.go
generated
vendored
|
@ -23,11 +23,11 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
admission "k8s.io/kubernetes/pkg/admission"
|
||||
admission "k8s.io/apiserver/pkg/admission"
|
||||
api "k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
|
@ -71,15 +71,11 @@ func (a *claimDefaulterPlugin) SetInternalClientSet(client internalclientset.Int
|
|||
a.store = cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
a.reflector = cache.NewReflector(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Storage().StorageClasses().List(internalOptions)
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return client.Storage().StorageClasses().List(options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Storage().StorageClasses().Watch(internalOptions)
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return client.Storage().StorageClasses().Watch(options)
|
||||
},
|
||||
},
|
||||
&storage.StorageClass{},
|
||||
|
|
18
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/admission_test.go
generated
vendored
18
vendor/k8s.io/kubernetes/plugin/pkg/admission/storageclass/default/admission_test.go
generated
vendored
|
@ -22,7 +22,7 @@ import (
|
|||
"github.com/golang/glog"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/admission"
|
||||
"k8s.io/apiserver/pkg/admission"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
|
@ -33,7 +33,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default1",
|
||||
Annotations: map[string]string{
|
||||
storageutil.IsDefaultStorageClassAnnotation: "true",
|
||||
|
@ -45,7 +45,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "default2",
|
||||
Annotations: map[string]string{
|
||||
storageutil.IsDefaultStorageClassAnnotation: "true",
|
||||
|
@ -58,7 +58,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nondefault1",
|
||||
Annotations: map[string]string{
|
||||
storageutil.IsDefaultStorageClassAnnotation: "false",
|
||||
|
@ -71,7 +71,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nondefault2",
|
||||
},
|
||||
Provisioner: "nondefault1",
|
||||
|
@ -81,7 +81,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StorageClass",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nondefault2",
|
||||
Annotations: map[string]string{
|
||||
storageutil.IsDefaultStorageClassAnnotation: "",
|
||||
|
@ -94,7 +94,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimWithClass",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
|
@ -106,7 +106,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimWithEmptyClass",
|
||||
Namespace: "ns",
|
||||
Annotations: map[string]string{
|
||||
|
@ -118,7 +118,7 @@ func TestAdmission(t *testing.T) {
|
|||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "claimWithNoClass",
|
||||
Namespace: "ns",
|
||||
},
|
||||
|
|
9
vendor/k8s.io/kubernetes/plugin/pkg/auth/OWNERS
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/pkg/auth/OWNERS
generated
vendored
|
@ -1,3 +1,6 @@
|
|||
assignees:
|
||||
- erictune
|
||||
- liggitt
|
||||
approvers:
|
||||
- erictune
|
||||
- liggitt
|
||||
reviewers:
|
||||
- erictune
|
||||
- liggitt
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/keystone/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/keystone/BUILD
generated
vendored
|
@ -15,12 +15,12 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/rackspace/gophercloud",
|
||||
"//vendor:github.com/rackspace/gophercloud/openstack",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/keystone/keystone.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/password/keystone/keystone.go
generated
vendored
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/rackspace/gophercloud/openstack"
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
// KeystoneAuthenticator contacts openstack keystone to validate user's credentials passed in the request.
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/BUILD
generated
vendored
|
@ -13,13 +13,13 @@ go_library(
|
|||
srcs = ["oidc.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//vendor:github.com/coreos/go-oidc/jose",
|
||||
"//vendor:github.com/coreos/go-oidc/oidc",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
6
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/OWNERS
generated
vendored
6
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/OWNERS
generated
vendored
|
@ -1,2 +1,4 @@
|
|||
assignees:
|
||||
- ericchiang
|
||||
approvers:
|
||||
- ericchiang
|
||||
reviewers:
|
||||
- ericchiang
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/oidc.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/oidc.go
generated
vendored
|
@ -42,7 +42,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
type OIDCOptions struct {
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/BUILD
generated
vendored
|
@ -18,7 +18,7 @@ go_library(
|
|||
"//vendor:k8s.io/apiserver/pkg/authentication/authenticator",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/cache",
|
||||
"//vendor:k8s.io/apiserver/pkg/webhook",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/webhook",
|
||||
"//vendor:k8s.io/client-go/kubernetes/typed/authentication/v1beta1",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/authentication/install",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/authentication/v1beta1",
|
||||
|
@ -35,9 +35,9 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/authentication/v1beta1:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/webhook.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/webhook.go
generated
vendored
|
@ -27,7 +27,7 @@ import (
|
|||
authenticationclient "k8s.io/client-go/kubernetes/typed/authentication/v1beta1"
|
||||
authentication "k8s.io/client-go/pkg/apis/authentication/v1beta1"
|
||||
|
||||
"k8s.io/apiserver/pkg/webhook"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
|
||||
_ "k8s.io/client-go/pkg/apis/authentication/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/authentication/install"
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/webhook_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/webhook/webhook_test.go
generated
vendored
|
@ -32,8 +32,8 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/authentication/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1"
|
||||
)
|
||||
|
||||
// Service mocks a remote authentication service.
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/BUILD
generated
vendored
|
@ -34,9 +34,9 @@ go_test(
|
|||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//pkg/registry/rbac/validation:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
],
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/BUILD
generated
vendored
|
@ -16,9 +16,9 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/apis/rbac:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
)
|
||||
|
||||
|
@ -59,7 +59,7 @@ func eventsRule() rbac.PolicyRule {
|
|||
|
||||
func init() {
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "attachdetach-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("persistentvolumes", "persistentvolumeclaims").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
|
@ -69,7 +69,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "cronjob-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "cronjob-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("cronjobs").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch", "create", "update", "delete").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
|
@ -79,7 +79,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "daemon-set-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("daemonsets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup).Resources("daemonsets/status").RuleOrDie(),
|
||||
|
@ -90,7 +90,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "deployment-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "deployment-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup).Resources("deployments/status").RuleOrDie(),
|
||||
|
@ -102,7 +102,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "disruption-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "disruption-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("deployments").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
|
@ -114,7 +114,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "endpoint-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "endpoint-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services", "pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "create", "update", "delete").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
|
@ -123,7 +123,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "generic-garbage-collector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// the GC controller needs to run list/watches, selective gets, and updates against any resource
|
||||
rbac.NewRule("get", "list", "watch", "patch", "update", "delete").Groups("*").Resources("*").RuleOrDie(),
|
||||
|
@ -131,7 +131,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "horizontal-pod-autoscaler"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(autoscalingGroup, extensionsGroup).Resources("horizontalpodautoscalers").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(autoscalingGroup, extensionsGroup).Resources("horizontalpodautoscalers/status").RuleOrDie(),
|
||||
|
@ -147,7 +147,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "job-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "job-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(batchGroup).Resources("jobs").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(batchGroup).Resources("jobs/status").RuleOrDie(),
|
||||
|
@ -156,7 +156,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "namespace-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "namespace-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "delete").Groups(legacyGroup).Resources("namespaces").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("namespaces/finalize", "namespaces/status").RuleOrDie(),
|
||||
|
@ -164,7 +164,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "node-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "node-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "update", "delete").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
|
@ -175,7 +175,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "persistent-volume-binder"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update", "create", "delete").Groups(legacyGroup).Resources("persistentvolumes").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("persistentvolumes/status").RuleOrDie(),
|
||||
|
@ -195,14 +195,14 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "pod-garbage-collector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("list").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "replicaset-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replicaset-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(extensionsGroup).Resources("replicasets").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(extensionsGroup).Resources("replicasets/status").RuleOrDie(),
|
||||
|
@ -211,7 +211,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "replication-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "replication-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// 1.0 controllers needed get, update, so without these old controllers break on new servers
|
||||
rbac.NewRule("get", "list", "watch", "update").Groups(legacyGroup).Resources("replicationcontrollers").RuleOrDie(),
|
||||
|
@ -221,7 +221,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "resourcequota-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// quota can count quota on anything for reconcilation, so it needs full viewing powers
|
||||
rbac.NewRule("list", "watch").Groups("*").Resources("*").RuleOrDie(),
|
||||
|
@ -230,7 +230,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "route-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "route-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
|
@ -238,14 +238,14 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "service-account-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-account-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("serviceaccounts").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "service-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(legacyGroup).Resources("services").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(legacyGroup).Resources("services/status").RuleOrDie(),
|
||||
|
@ -254,7 +254,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("pods").RuleOrDie(),
|
||||
rbac.NewRule("get", "list", "watch").Groups(appsGroup).Resources("statefulsets").RuleOrDie(),
|
||||
|
@ -265,7 +265,7 @@ func init() {
|
|||
},
|
||||
})
|
||||
addControllerRole(rbac.ClusterRole{
|
||||
ObjectMeta: api.ObjectMeta{Name: saRolePrefix + "certificate-controller"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "certificate-controller"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
rbac.NewRule("update").Groups(certificatesGroup).Resources("certificatesigningrequests/status", "certificatesigningrequests/approval").RuleOrDie(),
|
||||
|
|
63
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
generated
vendored
63
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/policy.go
generated
vendored
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||
package bootstrappolicy
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
)
|
||||
|
||||
|
@ -72,7 +72,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
roles := []rbac.ClusterRole{
|
||||
{
|
||||
// a "root" role which can do absolutely anything
|
||||
ObjectMeta: api.ObjectMeta{Name: "cluster-admin"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "cluster-admin"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("*").Groups("*").Resources("*").RuleOrDie(),
|
||||
rbac.NewRule("*").URLs("*").RuleOrDie(),
|
||||
|
@ -80,14 +80,14 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
},
|
||||
{
|
||||
// a role which provides just enough power to discovery API versions for negotiation
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:discovery"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:discovery"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get").URLs("/version", "/swaggerapi", "/swaggerapi/*", "/api", "/api/*", "/apis", "/apis/*").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role which provides minimal resource access to allow a "normal" user to learn information about themselves
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:basic-user"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:basic-user"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// TODO add future selfsubjectrulesreview, project request APIs, project listing APIs
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("selfsubjectaccessreviews").RuleOrDie(),
|
||||
|
@ -96,7 +96,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
{
|
||||
// a role for a namespace level admin. It is `edit` plus the power to grant permissions to other users.
|
||||
ObjectMeta: api.ObjectMeta{Name: "admin"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "admin"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
|
@ -114,8 +114,8 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
|
||||
"horizontalpodautoscalers", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(),
|
||||
|
||||
// additional admin powers
|
||||
rbac.NewRule("create").Groups(authorizationGroup).Resources("localsubjectaccessreviews").RuleOrDie(),
|
||||
|
@ -126,7 +126,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
// a role for a namespace level editor. It grants access to all user level actions in a namespace.
|
||||
// It does not grant powers for "privileged" resources which are domain of the system: `/status`
|
||||
// subresources or `quota`/`limits` which are used to control namespaces
|
||||
ObjectMeta: api.ObjectMeta{Name: "edit"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "edit"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("pods", "pods/attach", "pods/proxy", "pods/exec", "pods/portforward").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(legacyGroup).Resources("replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
|
@ -144,14 +144,14 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(ReadWrite...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
rbac.NewRule(ReadWrite...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
|
||||
"horizontalpodautoscalers", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for namespace level viewing. It grants Read-only access to non-escalating resources in
|
||||
// a namespace.
|
||||
ObjectMeta: api.ObjectMeta{Name: "view"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "view"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("pods", "replicationcontrollers", "replicationcontrollers/scale", "serviceaccounts",
|
||||
"services", "endpoints", "persistentvolumeclaims", "configmaps").RuleOrDie(),
|
||||
|
@ -167,13 +167,20 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
|
||||
rbac.NewRule(Read...).Groups(batchGroup).Resources("jobs", "cronjobs", "scheduledjobs").RuleOrDie(),
|
||||
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "horizontalpodautoscalers",
|
||||
"replicationcontrollers/scale", "replicasets", "replicasets/scale", "deployments", "deployments/scale").RuleOrDie(),
|
||||
rbac.NewRule(Read...).Groups(extensionsGroup).Resources("daemonsets", "deployments", "deployments/scale",
|
||||
"horizontalpodautoscalers", "ingresses", "replicasets", "replicasets/scale", "replicationcontrollers/scale").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for heapster's connections back to the API server
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:heapster"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("events", "pods", "nodes", "namespaces").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role for nodes to use to have the access they need for running pods
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:node"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// Needed to check API access. These creates are non-mutating
|
||||
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
|
@ -212,9 +219,19 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
rbac.NewRule("get").Groups(legacyGroup).Resources("endpoints").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for node-problem-detector access. It does not get bound to default location since
|
||||
// deployment locations can reasonably vary.
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-problem-detector"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
rbac.NewRule("patch").Groups(legacyGroup).Resources("nodes/status").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for setting up a proxy
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:node-proxier"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-proxier"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// Used to build serviceLister
|
||||
rbac.NewRule("list", "watch").Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
|
@ -223,9 +240,19 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
eventsRule(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for bootstrapping a node's client certificates
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:node-bootstrapper"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// used to check if the node already exists
|
||||
rbac.NewRule("get").Groups(legacyGroup).Resources("nodes").RuleOrDie(),
|
||||
// used to create a certificatesigningrequest for a node-specific client certificate, and watch for it to be signed
|
||||
rbac.NewRule("create", "get", "list", "watch").Groups(certificatesGroup).Resources("certificatesigningrequests").RuleOrDie(),
|
||||
},
|
||||
},
|
||||
{
|
||||
// a role to use for allowing authentication and authorization delegation
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:auth-delegator"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:auth-delegator"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// These creates are non-mutating
|
||||
rbac.NewRule("create").Groups(authenticationGroup).Resources("tokenreviews").RuleOrDie(),
|
||||
|
@ -234,7 +261,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
},
|
||||
{
|
||||
// a role to use for the API registry, summarization, and proxy handling
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:kube-aggregator"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-aggregator"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
// it needs to see all services so that it knows whether the ones it points to exist or not
|
||||
rbac.NewRule(Read...).Groups(legacyGroup).Resources("services", "endpoints").RuleOrDie(),
|
||||
|
@ -243,7 +270,7 @@ func ClusterRoles() []rbac.ClusterRole {
|
|||
{
|
||||
// a role to use for bootstrapping the kube-controller-manager so it can create the shared informers
|
||||
// service accounts, and secrets that we need to create separate identities for other controllers
|
||||
ObjectMeta: api.ObjectMeta{Name: "system:kube-controller-manager"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "system:kube-controller-manager"},
|
||||
Rules: []rbac.PolicyRule{
|
||||
eventsRule(),
|
||||
rbac.NewRule("create").Groups(legacyGroup).Resources("endpoints", "secrets", "serviceaccounts").RuleOrDie(),
|
||||
|
|
|
@ -10,7 +10,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
- pods/attach
|
||||
|
@ -28,7 +27,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
|
@ -50,7 +48,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- bindings
|
||||
- events
|
||||
|
@ -67,7 +64,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
|
@ -76,14 +72,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
|
@ -97,7 +91,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs:
|
||||
|
@ -111,7 +104,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
|
@ -127,12 +119,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- ingresses
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
@ -147,14 +139,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- localsubjectaccessreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- rolebindings
|
||||
- roles
|
||||
|
@ -177,13 +167,11 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
- attributeRestrictions: null
|
||||
nonResourceURLs:
|
||||
- nonResourceURLs:
|
||||
- '*'
|
||||
verbs:
|
||||
- '*'
|
||||
|
@ -197,7 +185,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
- pods/attach
|
||||
|
@ -215,7 +202,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
|
@ -237,7 +223,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- bindings
|
||||
- events
|
||||
|
@ -254,7 +239,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
|
@ -263,14 +247,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- impersonate
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
|
@ -284,7 +266,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs:
|
||||
|
@ -298,7 +279,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
|
@ -314,12 +294,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- ingresses
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
@ -342,14 +322,12 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs:
|
||||
|
@ -364,7 +342,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- selfsubjectaccessreviews
|
||||
verbs:
|
||||
|
@ -377,8 +354,7 @@ items:
|
|||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:discovery
|
||||
rules:
|
||||
- attributeRestrictions: null
|
||||
nonResourceURLs:
|
||||
- nonResourceURLs:
|
||||
- /api
|
||||
- /api/*
|
||||
- /apis
|
||||
|
@ -388,6 +364,25 @@ items:
|
|||
- /version
|
||||
verbs:
|
||||
- get
|
||||
- apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:heapster
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
- namespaces
|
||||
- nodes
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -398,7 +393,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
|
@ -416,7 +410,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -425,7 +418,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- secrets
|
||||
|
@ -434,14 +426,12 @@ items:
|
|||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- delete
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- namespaces
|
||||
|
@ -450,7 +440,6 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- serviceaccounts
|
||||
|
@ -458,7 +447,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- '*'
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces
|
||||
- nodes
|
||||
|
@ -473,7 +461,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
|
@ -483,7 +470,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
|
@ -500,14 +486,12 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- localsubjectaccessreviews
|
||||
- subjectaccessreviews
|
||||
|
@ -515,7 +499,6 @@ items:
|
|||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
|
@ -524,7 +507,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -534,7 +516,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
|
@ -542,7 +523,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -551,7 +531,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -560,7 +539,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -569,14 +547,12 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
|
@ -584,7 +560,6 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
|
@ -592,11 +567,61 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:node-bootstrapper
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
resources:
|
||||
- certificatesigningrequests
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:node-problem-detector
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
@ -607,7 +632,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
|
@ -616,14 +640,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -640,7 +662,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- configmaps
|
||||
- endpoints
|
||||
|
@ -656,7 +677,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- bindings
|
||||
- events
|
||||
|
@ -673,7 +693,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
|
@ -682,7 +701,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
|
@ -691,7 +709,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs:
|
||||
|
@ -700,7 +717,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
|
@ -711,12 +727,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- deployments/scale
|
||||
- horizontalpodautoscalers
|
||||
- ingresses
|
||||
- replicasets
|
||||
- replicasets/scale
|
||||
- replicationcontrollers/scale
|
||||
|
|
|
@ -10,7 +10,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
- persistentvolumes
|
||||
|
@ -19,7 +18,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -28,7 +26,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
|
@ -36,7 +33,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -44,7 +40,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -61,7 +56,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- certificatesigningrequests
|
||||
verbs:
|
||||
|
@ -70,7 +64,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- certificates.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- certificatesigningrequests/approval
|
||||
- certificatesigningrequests/status
|
||||
|
@ -78,7 +71,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -95,7 +87,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs
|
||||
verbs:
|
||||
|
@ -105,7 +96,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
|
@ -117,14 +107,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- cronjobs/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -132,7 +120,6 @@ items:
|
|||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -149,7 +136,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
|
@ -158,14 +144,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- daemonsets/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -173,7 +157,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -183,14 +166,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods/binding
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -207,7 +188,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
|
@ -217,14 +197,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- deployments/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
|
@ -237,7 +215,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -247,7 +224,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -264,7 +240,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- deployments
|
||||
verbs:
|
||||
|
@ -273,7 +248,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
|
@ -282,7 +256,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicationcontrollers
|
||||
verbs:
|
||||
|
@ -291,7 +264,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- policy
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- poddisruptionbudgets
|
||||
verbs:
|
||||
|
@ -300,7 +272,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
|
@ -309,14 +280,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- policy
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- poddisruptionbudgets/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -333,7 +302,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
- services
|
||||
|
@ -343,7 +311,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
verbs:
|
||||
|
@ -354,14 +321,12 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints/restricted
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -378,7 +343,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
|
@ -390,7 +354,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -408,7 +371,6 @@ items:
|
|||
- apiGroups:
|
||||
- autoscaling
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs:
|
||||
|
@ -418,14 +380,12 @@ items:
|
|||
- apiGroups:
|
||||
- autoscaling
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- horizontalpodautoscalers/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicationcontrollers/scale
|
||||
verbs:
|
||||
|
@ -433,7 +393,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicationcontrollers/scale
|
||||
verbs:
|
||||
|
@ -441,7 +400,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- deployments/scale
|
||||
- replicasets/scale
|
||||
|
@ -450,14 +408,12 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resourceNames:
|
||||
- 'http:heapster:'
|
||||
- 'https:heapster:'
|
||||
|
@ -467,7 +423,6 @@ items:
|
|||
- proxy
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -484,7 +439,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- jobs
|
||||
verbs:
|
||||
|
@ -494,14 +448,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- jobs/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -511,7 +463,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -528,7 +479,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
|
@ -538,7 +488,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- namespaces/finalize
|
||||
- namespaces/status
|
||||
|
@ -546,7 +495,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- '*'
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
|
@ -564,7 +512,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -574,21 +521,18 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -596,7 +540,6 @@ items:
|
|||
- list
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -613,7 +556,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumes
|
||||
verbs:
|
||||
|
@ -625,14 +567,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumes/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
|
@ -642,14 +582,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumeclaims/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -660,7 +598,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
|
@ -669,7 +606,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
|
@ -679,21 +615,18 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- secrets
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -710,7 +643,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -719,7 +651,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -734,7 +665,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicasets
|
||||
verbs:
|
||||
|
@ -744,14 +674,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicasets/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -762,7 +690,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -779,7 +706,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicationcontrollers
|
||||
verbs:
|
||||
|
@ -789,14 +715,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- replicationcontrollers/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -807,7 +731,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -824,7 +747,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- '*'
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- '*'
|
||||
verbs:
|
||||
|
@ -832,14 +754,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- resourcequotas/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -856,7 +776,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -864,14 +783,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -888,14 +805,12 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -912,7 +827,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
|
@ -921,14 +835,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
|
@ -936,7 +848,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
@ -953,7 +864,6 @@ items:
|
|||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -961,7 +871,6 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs:
|
||||
|
@ -970,14 +879,12 @@ items:
|
|||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- statefulsets/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
|
@ -987,7 +894,6 @@ items:
|
|||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- persistentvolumeclaims
|
||||
verbs:
|
||||
|
@ -995,7 +901,6 @@ items:
|
|||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
attributeRestrictions: null
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
|
|
10
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac_test.go
generated
vendored
10
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/rbac/rbac_test.go
generated
vendored
|
@ -21,9 +21,9 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
rbacregistryvalidation "k8s.io/kubernetes/pkg/registry/rbac/validation"
|
||||
)
|
||||
|
@ -38,11 +38,11 @@ func newRule(verbs, apiGroups, resources, nonResourceURLs string) rbac.PolicyRul
|
|||
}
|
||||
|
||||
func newRole(name, namespace string, rules ...rbac.PolicyRule) *rbac.Role {
|
||||
return &rbac.Role{ObjectMeta: api.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
|
||||
return &rbac.Role{ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: name}, Rules: rules}
|
||||
}
|
||||
|
||||
func newClusterRole(name string, rules ...rbac.PolicyRule) *rbac.ClusterRole {
|
||||
return &rbac.ClusterRole{ObjectMeta: api.ObjectMeta{Name: name}, Rules: rules}
|
||||
return &rbac.ClusterRole{ObjectMeta: metav1.ObjectMeta{Name: name}, Rules: rules}
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -52,7 +52,7 @@ const (
|
|||
|
||||
func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRoleBinding {
|
||||
r := &rbac.ClusterRoleBinding{
|
||||
ObjectMeta: api.ObjectMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: rbac.GroupName,
|
||||
Kind: "ClusterRole", // ClusterRoleBindings can only refer to ClusterRole
|
||||
|
@ -69,7 +69,7 @@ func newClusterRoleBinding(roleName string, subjects ...string) *rbac.ClusterRol
|
|||
}
|
||||
|
||||
func newRoleBinding(namespace, roleName string, bindType uint16, subjects ...string) *rbac.RoleBinding {
|
||||
r := &rbac.RoleBinding{ObjectMeta: api.ObjectMeta{Namespace: namespace}}
|
||||
r := &rbac.RoleBinding{ObjectMeta: metav1.ObjectMeta{Namespace: namespace}}
|
||||
|
||||
switch bindType {
|
||||
case bindToRole:
|
||||
|
|
4
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/BUILD
generated
vendored
|
@ -18,7 +18,7 @@ go_library(
|
|||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/cache",
|
||||
"//vendor:k8s.io/apiserver/pkg/webhook",
|
||||
"//vendor:k8s.io/apiserver/pkg/util/webhook",
|
||||
"//vendor:k8s.io/client-go/kubernetes/typed/authorization/v1beta1",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/authorization/install",
|
||||
"//vendor:k8s.io/client-go/pkg/apis/authorization/v1beta1",
|
||||
|
@ -35,11 +35,11 @@ go_test(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/apis/authorization/v1beta1:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apiserver/pkg/authentication/user",
|
||||
"//vendor:k8s.io/apiserver/pkg/authorization/authorizer",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api/v1",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/webhook.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/webhook.go
generated
vendored
|
@ -29,7 +29,7 @@ import (
|
|||
authorizationclient "k8s.io/client-go/kubernetes/typed/authorization/v1beta1"
|
||||
authorization "k8s.io/client-go/pkg/apis/authorization/v1beta1"
|
||||
|
||||
"k8s.io/apiserver/pkg/webhook"
|
||||
"k8s.io/apiserver/pkg/util/webhook"
|
||||
|
||||
_ "k8s.io/client-go/pkg/apis/authorization/install"
|
||||
_ "k8s.io/kubernetes/pkg/apis/authorization/install"
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/webhook_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/auth/authorizer/webhook/webhook_test.go
generated
vendored
|
@ -36,8 +36,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apiserver/pkg/authentication/user"
|
||||
"k8s.io/apiserver/pkg/authorization/authorizer"
|
||||
"k8s.io/client-go/tools/clientcmd/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/authorization/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1"
|
||||
)
|
||||
|
||||
func TestNewFromConfig(t *testing.T) {
|
||||
|
|
35
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/BUILD
generated
vendored
35
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/BUILD
generated
vendored
|
@ -1,35 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["plugins.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//plugin/pkg/client/auth/gcp:go_default_library",
|
||||
"//plugin/pkg/client/auth/oidc:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//plugin/pkg/client/auth/gcp:all-srcs",
|
||||
"//plugin/pkg/client/auth/oidc:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
3
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/OWNERS
generated
vendored
3
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/OWNERS
generated
vendored
|
@ -1,3 +0,0 @@
|
|||
assignees:
|
||||
- cjcullen
|
||||
- jlowdermilk
|
274
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go
generated
vendored
274
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp.go
generated
vendored
|
@ -1,274 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/util/jsonpath"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := restclient.RegisterAuthProviderPlugin("gcp", newGCPAuthProvider); err != nil {
|
||||
glog.Fatalf("Failed to register gcp auth plugin: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// gcpAuthProvider is an auth provider plugin that uses GCP credentials to provide
|
||||
// tokens for kubectl to authenticate itself to the apiserver. A sample json config
|
||||
// is provided below with all recognized options described.
|
||||
//
|
||||
// {
|
||||
// 'auth-provider': {
|
||||
// # Required
|
||||
// "name": "gcp",
|
||||
//
|
||||
// 'config': {
|
||||
// # Caching options
|
||||
//
|
||||
// # Raw string data representing cached access token.
|
||||
// "access-token": "ya29.CjWdA4GiBPTt",
|
||||
// # RFC3339Nano expiration timestamp for cached access token.
|
||||
// "expiry": "2016-10-31 22:31:9.123",
|
||||
//
|
||||
// # Command execution options
|
||||
// # These options direct the plugin to execute a specified command and parse
|
||||
// # token and expiry time from the output of the command.
|
||||
//
|
||||
// # Command to execute for access token. String is split on whitespace
|
||||
// # with first field treated as the executable, remaining fields as args.
|
||||
// # Command output will be parsed as JSON.
|
||||
// "cmd-path": "/usr/bin/gcloud config config-helper --output=json",
|
||||
//
|
||||
// # JSONPath to the string field that represents the access token in
|
||||
// # command output. If omitted, defaults to "{.access_token}".
|
||||
// "token-key": "{.credential.access_token}",
|
||||
//
|
||||
// # JSONPath to the string field that represents expiration timestamp
|
||||
// # of the access token in the command output. If omitted, defaults to
|
||||
// # "{.token_expiry}"
|
||||
// "expiry-key": ""{.credential.token_expiry}",
|
||||
//
|
||||
// # golang reference time in the format that the expiration timestamp uses.
|
||||
// # If omitted, defaults to time.RFC3339Nano
|
||||
// "time-fmt": "2006-01-02 15:04:05.999999999"
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//
|
||||
type gcpAuthProvider struct {
|
||||
tokenSource oauth2.TokenSource
|
||||
persister restclient.AuthProviderConfigPersister
|
||||
}
|
||||
|
||||
func newGCPAuthProvider(_ string, gcpConfig map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
|
||||
cmd, useCmd := gcpConfig["cmd-path"]
|
||||
var ts oauth2.TokenSource
|
||||
var err error
|
||||
if useCmd {
|
||||
ts, err = newCmdTokenSource(cmd, gcpConfig["token-key"], gcpConfig["expiry-key"], gcpConfig["time-fmt"])
|
||||
} else {
|
||||
ts, err = google.DefaultTokenSource(context.Background(), "https://www.googleapis.com/auth/cloud-platform")
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cts, err := newCachedTokenSource(gcpConfig["access-token"], gcpConfig["expiry"], persister, ts, gcpConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &gcpAuthProvider{cts, persister}, nil
|
||||
}
|
||||
|
||||
func (g *gcpAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
|
||||
return &oauth2.Transport{
|
||||
Source: g.tokenSource,
|
||||
Base: rt,
|
||||
}
|
||||
}
|
||||
|
||||
func (g *gcpAuthProvider) Login() error { return nil }
|
||||
|
||||
type cachedTokenSource struct {
|
||||
lk sync.Mutex
|
||||
source oauth2.TokenSource
|
||||
accessToken string
|
||||
expiry time.Time
|
||||
persister restclient.AuthProviderConfigPersister
|
||||
cache map[string]string
|
||||
}
|
||||
|
||||
func newCachedTokenSource(accessToken, expiry string, persister restclient.AuthProviderConfigPersister, ts oauth2.TokenSource, cache map[string]string) (*cachedTokenSource, error) {
|
||||
var expiryTime time.Time
|
||||
if parsedTime, err := time.Parse(time.RFC3339Nano, expiry); err == nil {
|
||||
expiryTime = parsedTime
|
||||
}
|
||||
if cache == nil {
|
||||
cache = make(map[string]string)
|
||||
}
|
||||
return &cachedTokenSource{
|
||||
source: ts,
|
||||
accessToken: accessToken,
|
||||
expiry: expiryTime,
|
||||
persister: persister,
|
||||
cache: cache,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (t *cachedTokenSource) Token() (*oauth2.Token, error) {
|
||||
tok := t.cachedToken()
|
||||
if tok.Valid() && !tok.Expiry.IsZero() {
|
||||
return tok, nil
|
||||
}
|
||||
tok, err := t.source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cache := t.update(tok)
|
||||
if t.persister != nil {
|
||||
if err := t.persister.Persist(cache); err != nil {
|
||||
glog.V(4).Infof("Failed to persist token: %v", err)
|
||||
}
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
func (t *cachedTokenSource) cachedToken() *oauth2.Token {
|
||||
t.lk.Lock()
|
||||
defer t.lk.Unlock()
|
||||
return &oauth2.Token{
|
||||
AccessToken: t.accessToken,
|
||||
TokenType: "Bearer",
|
||||
Expiry: t.expiry,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *cachedTokenSource) update(tok *oauth2.Token) map[string]string {
|
||||
t.lk.Lock()
|
||||
defer t.lk.Unlock()
|
||||
t.accessToken = tok.AccessToken
|
||||
t.expiry = tok.Expiry
|
||||
ret := map[string]string{}
|
||||
for k, v := range t.cache {
|
||||
ret[k] = v
|
||||
}
|
||||
ret["access-token"] = t.accessToken
|
||||
ret["expiry"] = t.expiry.Format(time.RFC3339Nano)
|
||||
return ret
|
||||
}
|
||||
|
||||
type commandTokenSource struct {
|
||||
cmd string
|
||||
args []string
|
||||
tokenKey string
|
||||
expiryKey string
|
||||
timeFmt string
|
||||
}
|
||||
|
||||
func newCmdTokenSource(cmd, tokenKey, expiryKey, timeFmt string) (*commandTokenSource, error) {
|
||||
if len(timeFmt) == 0 {
|
||||
timeFmt = time.RFC3339Nano
|
||||
}
|
||||
if len(tokenKey) == 0 {
|
||||
tokenKey = "{.access_token}"
|
||||
}
|
||||
if len(expiryKey) == 0 {
|
||||
expiryKey = "{.token_expiry}"
|
||||
}
|
||||
fields := strings.Fields(cmd)
|
||||
if len(fields) == 0 {
|
||||
return nil, fmt.Errorf("missing access token cmd")
|
||||
}
|
||||
return &commandTokenSource{
|
||||
cmd: fields[0],
|
||||
args: fields[1:],
|
||||
tokenKey: tokenKey,
|
||||
expiryKey: expiryKey,
|
||||
timeFmt: timeFmt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *commandTokenSource) Token() (*oauth2.Token, error) {
|
||||
fullCmd := fmt.Sprintf("%s %s", c.cmd, strings.Join(c.args, " "))
|
||||
cmd := exec.Command(c.cmd, c.args...)
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error executing access token command %q: %v", fullCmd, err)
|
||||
}
|
||||
token, err := c.parseTokenCmdOutput(output)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing output for access token command %q: %v", fullCmd, err)
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
|
||||
func (c *commandTokenSource) parseTokenCmdOutput(output []byte) (*oauth2.Token, error) {
|
||||
output, err := yaml.ToJSON(output)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var data interface{}
|
||||
if err := json.Unmarshal(output, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
accessToken, err := parseJSONPath(data, "token-key", c.tokenKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing token-key %q: %v", c.tokenKey, err)
|
||||
}
|
||||
expiryStr, err := parseJSONPath(data, "expiry-key", c.expiryKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing expiry-key %q: %v", c.expiryKey, err)
|
||||
}
|
||||
var expiry time.Time
|
||||
if t, err := time.Parse(c.timeFmt, expiryStr); err != nil {
|
||||
glog.V(4).Infof("Failed to parse token expiry from %s (fmt=%s): %v", expiryStr, c.timeFmt, err)
|
||||
} else {
|
||||
expiry = t
|
||||
}
|
||||
|
||||
return &oauth2.Token{
|
||||
AccessToken: accessToken,
|
||||
TokenType: "Bearer",
|
||||
Expiry: expiry,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func parseJSONPath(input interface{}, name, template string) (string, error) {
|
||||
j := jsonpath.New(name)
|
||||
buf := new(bytes.Buffer)
|
||||
if err := j.Parse(template); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := j.Execute(buf, input); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
211
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp_test.go
generated
vendored
211
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/gcp/gcp_test.go
generated
vendored
|
@ -1,211 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gcp
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
func TestCmdTokenSource(t *testing.T) {
|
||||
fakeExpiry := time.Date(2016, 10, 31, 22, 31, 9, 123000000, time.UTC)
|
||||
customFmt := "2006-01-02 15:04:05.999999999"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
output []byte
|
||||
cmd, tokenKey, expiryKey, timeFmt string
|
||||
tok *oauth2.Token
|
||||
expectErr error
|
||||
}{
|
||||
{
|
||||
"defaults",
|
||||
[]byte(`{
|
||||
"access_token": "faketoken",
|
||||
"token_expiry": "2016-10-31T22:31:09.123000000Z"
|
||||
}`),
|
||||
"/fake/cmd/path", "", "", "",
|
||||
&oauth2.Token{
|
||||
AccessToken: "faketoken",
|
||||
TokenType: "Bearer",
|
||||
Expiry: fakeExpiry,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"custom keys",
|
||||
[]byte(`{
|
||||
"token": "faketoken",
|
||||
"token_expiry": {
|
||||
"datetime": "2016-10-31 22:31:09.123"
|
||||
}
|
||||
}`),
|
||||
"/fake/cmd/path", "{.token}", "{.token_expiry.datetime}", customFmt,
|
||||
&oauth2.Token{
|
||||
AccessToken: "faketoken",
|
||||
TokenType: "Bearer",
|
||||
Expiry: fakeExpiry,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"missing cmd",
|
||||
nil,
|
||||
"", "", "", "",
|
||||
nil,
|
||||
fmt.Errorf("missing access token cmd"),
|
||||
},
|
||||
{
|
||||
"missing token-key",
|
||||
[]byte(`{
|
||||
"broken": "faketoken",
|
||||
"token_expiry": {
|
||||
"datetime": "2016-10-31 22:31:09.123000000Z"
|
||||
}
|
||||
}`),
|
||||
"/fake/cmd/path", "{.token}", "", "",
|
||||
nil,
|
||||
fmt.Errorf("error parsing token-key %q", "{.token}"),
|
||||
},
|
||||
{
|
||||
"missing expiry-key",
|
||||
[]byte(`{
|
||||
"access_token": "faketoken",
|
||||
"expires": "2016-10-31T22:31:09.123000000Z"
|
||||
}`),
|
||||
"/fake/cmd/path", "", "{.expiry}", "",
|
||||
nil,
|
||||
fmt.Errorf("error parsing expiry-key %q", "{.expiry}"),
|
||||
},
|
||||
{
|
||||
"invalid expiry timestamp",
|
||||
[]byte(`{
|
||||
"access_token": "faketoken",
|
||||
"token_expiry": "sometime soon, idk"
|
||||
}`),
|
||||
"/fake/cmd/path", "", "", "",
|
||||
&oauth2.Token{
|
||||
AccessToken: "faketoken",
|
||||
TokenType: "Bearer",
|
||||
Expiry: time.Time{},
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
"bad JSON",
|
||||
[]byte(`{
|
||||
"access_token": "faketoken",
|
||||
"token_expiry": "sometime soon, idk"
|
||||
------
|
||||
`),
|
||||
"/fake/cmd", "", "", "",
|
||||
nil,
|
||||
fmt.Errorf("invalid character '-' after object key:value pair"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
ts, err := newCmdTokenSource(tc.cmd, tc.tokenKey, tc.expiryKey, tc.timeFmt)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), tc.expectErr.Error()) {
|
||||
t.Errorf("%s newCmdTokenSource error: %v, want %v", tc.name, err, tc.expectErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
tok, err := ts.parseTokenCmdOutput(tc.output)
|
||||
|
||||
if err != tc.expectErr && !strings.Contains(err.Error(), tc.expectErr.Error()) {
|
||||
t.Errorf("%s parseCmdTokenSource error: %v, want %v", tc.name, err, tc.expectErr)
|
||||
}
|
||||
if !reflect.DeepEqual(tok, tc.tok) {
|
||||
t.Errorf("%s got token %v, want %v", tc.name, tok, tc.tok)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type fakePersister struct {
|
||||
lk sync.Mutex
|
||||
cache map[string]string
|
||||
}
|
||||
|
||||
func (f *fakePersister) Persist(cache map[string]string) error {
|
||||
f.lk.Lock()
|
||||
defer f.lk.Unlock()
|
||||
f.cache = map[string]string{}
|
||||
for k, v := range cache {
|
||||
f.cache[k] = v
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePersister) read() map[string]string {
|
||||
ret := map[string]string{}
|
||||
f.lk.Lock()
|
||||
for k, v := range f.cache {
|
||||
ret[k] = v
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type fakeTokenSource struct {
|
||||
token *oauth2.Token
|
||||
err error
|
||||
}
|
||||
|
||||
func (f *fakeTokenSource) Token() (*oauth2.Token, error) {
|
||||
return f.token, f.err
|
||||
}
|
||||
|
||||
func TestCachedTokenSource(t *testing.T) {
|
||||
tok := &oauth2.Token{AccessToken: "fakeaccesstoken"}
|
||||
persister := &fakePersister{}
|
||||
source := &fakeTokenSource{
|
||||
token: tok,
|
||||
err: nil,
|
||||
}
|
||||
cache := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "bazinga",
|
||||
}
|
||||
ts, err := newCachedTokenSource("fakeaccesstoken", "", persister, source, cache)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(10)
|
||||
for i := 0; i < 10; i++ {
|
||||
go func() {
|
||||
_, err := ts.Token()
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %s", err)
|
||||
}
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
cache["access-token"] = "fakeaccesstoken"
|
||||
cache["expiry"] = tok.Expiry.Format(time.RFC3339Nano)
|
||||
if got := persister.read(); !reflect.DeepEqual(got, cache) {
|
||||
t.Errorf("got cache %v, want %v", got, cache)
|
||||
}
|
||||
}
|
48
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/BUILD
generated
vendored
48
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/BUILD
generated
vendored
|
@ -1,48 +0,0 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["oidc.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//vendor:github.com/coreos/go-oidc/jose",
|
||||
"//vendor:github.com/coreos/go-oidc/oauth2",
|
||||
"//vendor:github.com/coreos/go-oidc/oidc",
|
||||
"//vendor:github.com/golang/glog",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["oidc_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//plugin/pkg/auth/authenticator/token/oidc/testing:go_default_library",
|
||||
"//vendor:github.com/coreos/go-oidc/jose",
|
||||
"//vendor:github.com/coreos/go-oidc/key",
|
||||
"//vendor:github.com/coreos/go-oidc/oauth2",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
2
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/OWNERS
generated
vendored
|
@ -1,2 +0,0 @@
|
|||
assignees:
|
||||
- ericchiang
|
333
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go
generated
vendored
333
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc.go
generated
vendored
|
@ -1,333 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
"github.com/coreos/go-oidc/oidc"
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
)
|
||||
|
||||
const (
|
||||
cfgIssuerUrl = "idp-issuer-url"
|
||||
cfgClientID = "client-id"
|
||||
cfgClientSecret = "client-secret"
|
||||
cfgCertificateAuthority = "idp-certificate-authority"
|
||||
cfgCertificateAuthorityData = "idp-certificate-authority-data"
|
||||
cfgExtraScopes = "extra-scopes"
|
||||
cfgIDToken = "id-token"
|
||||
cfgRefreshToken = "refresh-token"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if err := restclient.RegisterAuthProviderPlugin("oidc", newOIDCAuthProvider); err != nil {
|
||||
glog.Fatalf("Failed to register oidc auth plugin: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
//
|
||||
// NOTE(ericchiang): this is take from golang.org/x/oauth2
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
var cache = newClientCache()
|
||||
|
||||
// Like TLS transports, keep a cache of OIDC clients indexed by issuer URL.
|
||||
type clientCache struct {
|
||||
mu sync.RWMutex
|
||||
cache map[cacheKey]*oidcAuthProvider
|
||||
}
|
||||
|
||||
func newClientCache() *clientCache {
|
||||
return &clientCache{cache: make(map[cacheKey]*oidcAuthProvider)}
|
||||
}
|
||||
|
||||
type cacheKey struct {
|
||||
// Canonical issuer URL string of the provider.
|
||||
issuerURL string
|
||||
|
||||
clientID string
|
||||
clientSecret string
|
||||
|
||||
// Don't use CA as cache key because we only add a cache entry if we can connect
|
||||
// to the issuer in the first place. A valid CA is a prerequisite.
|
||||
}
|
||||
|
||||
func (c *clientCache) getClient(issuer, clientID, clientSecret string) (*oidcAuthProvider, bool) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
client, ok := c.cache[cacheKey{issuer, clientID, clientSecret}]
|
||||
return client, ok
|
||||
}
|
||||
|
||||
// setClient attempts to put the client in the cache but may return any clients
|
||||
// with the same keys set before. This is so there's only ever one client for a provider.
|
||||
func (c *clientCache) setClient(issuer, clientID, clientSecret string, client *oidcAuthProvider) *oidcAuthProvider {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
key := cacheKey{issuer, clientID, clientSecret}
|
||||
|
||||
// If another client has already initialized a client for the given provider we want
|
||||
// to use that client instead of the one we're trying to set. This is so all transports
|
||||
// share a client and can coordinate around the same mutex when refreshing and writing
|
||||
// to the kubeconfig.
|
||||
if oldClient, ok := c.cache[key]; ok {
|
||||
return oldClient
|
||||
}
|
||||
|
||||
c.cache[key] = client
|
||||
return client
|
||||
}
|
||||
|
||||
func newOIDCAuthProvider(_ string, cfg map[string]string, persister restclient.AuthProviderConfigPersister) (restclient.AuthProvider, error) {
|
||||
issuer := cfg[cfgIssuerUrl]
|
||||
if issuer == "" {
|
||||
return nil, fmt.Errorf("Must provide %s", cfgIssuerUrl)
|
||||
}
|
||||
|
||||
clientID := cfg[cfgClientID]
|
||||
if clientID == "" {
|
||||
return nil, fmt.Errorf("Must provide %s", cfgClientID)
|
||||
}
|
||||
|
||||
clientSecret := cfg[cfgClientSecret]
|
||||
if clientSecret == "" {
|
||||
return nil, fmt.Errorf("Must provide %s", cfgClientSecret)
|
||||
}
|
||||
|
||||
// Check cache for existing provider.
|
||||
if provider, ok := cache.getClient(issuer, clientID, clientSecret); ok {
|
||||
return provider, nil
|
||||
}
|
||||
|
||||
var certAuthData []byte
|
||||
var err error
|
||||
if cfg[cfgCertificateAuthorityData] != "" {
|
||||
certAuthData, err = base64.StdEncoding.DecodeString(cfg[cfgCertificateAuthorityData])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
clientConfig := restclient.Config{
|
||||
TLSClientConfig: restclient.TLSClientConfig{
|
||||
CAFile: cfg[cfgCertificateAuthority],
|
||||
CAData: certAuthData,
|
||||
},
|
||||
}
|
||||
|
||||
trans, err := restclient.TransportFor(&clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hc := &http.Client{Transport: trans}
|
||||
|
||||
providerCfg, err := oidc.FetchProviderConfig(hc, issuer)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error fetching provider config: %v", err)
|
||||
}
|
||||
|
||||
scopes := strings.Split(cfg[cfgExtraScopes], ",")
|
||||
oidcCfg := oidc.ClientConfig{
|
||||
HTTPClient: hc,
|
||||
Credentials: oidc.ClientCredentials{
|
||||
ID: clientID,
|
||||
Secret: clientSecret,
|
||||
},
|
||||
ProviderConfig: providerCfg,
|
||||
Scope: append(scopes, oidc.DefaultScope...),
|
||||
}
|
||||
client, err := oidc.NewClient(oidcCfg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error creating OIDC Client: %v", err)
|
||||
}
|
||||
|
||||
provider := &oidcAuthProvider{
|
||||
client: &oidcClient{client},
|
||||
cfg: cfg,
|
||||
persister: persister,
|
||||
now: time.Now,
|
||||
}
|
||||
|
||||
return cache.setClient(issuer, clientID, clientSecret, provider), nil
|
||||
}
|
||||
|
||||
type oidcAuthProvider struct {
|
||||
// Interface rather than a raw *oidc.Client for testing.
|
||||
client OIDCClient
|
||||
|
||||
// Stubbed out for testing.
|
||||
now func() time.Time
|
||||
|
||||
// Mutex guards persisting to the kubeconfig file and allows synchronized
|
||||
// updates to the in-memory config. It also ensures concurrent calls to
|
||||
// the RoundTripper only trigger a single refresh request.
|
||||
mu sync.Mutex
|
||||
cfg map[string]string
|
||||
persister restclient.AuthProviderConfigPersister
|
||||
}
|
||||
|
||||
func (p *oidcAuthProvider) WrapTransport(rt http.RoundTripper) http.RoundTripper {
|
||||
return &roundTripper{
|
||||
wrapped: rt,
|
||||
provider: p,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *oidcAuthProvider) Login() error {
|
||||
return errors.New("not yet implemented")
|
||||
}
|
||||
|
||||
type OIDCClient interface {
|
||||
refreshToken(rt string) (oauth2.TokenResponse, error)
|
||||
verifyJWT(jwt *jose.JWT) error
|
||||
}
|
||||
|
||||
type roundTripper struct {
|
||||
provider *oidcAuthProvider
|
||||
wrapped http.RoundTripper
|
||||
}
|
||||
|
||||
func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
token, err := r.provider.idToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *req
|
||||
// deep copy of the Header so we don't modify the original
|
||||
// request's Header (as per RoundTripper contract).
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range req.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
r2.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
|
||||
|
||||
return r.wrapped.RoundTrip(r2)
|
||||
}
|
||||
|
||||
func (p *oidcAuthProvider) idToken() (string, error) {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
|
||||
if idToken, ok := p.cfg[cfgIDToken]; ok && len(idToken) > 0 {
|
||||
valid, err := verifyJWTExpiry(p.now(), idToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if valid {
|
||||
// If the cached id token is still valid use it.
|
||||
return idToken, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to request a new token using the refresh token.
|
||||
rt, ok := p.cfg[cfgRefreshToken]
|
||||
if !ok || len(rt) == 0 {
|
||||
return "", errors.New("No valid id-token, and cannot refresh without refresh-token")
|
||||
}
|
||||
|
||||
tokens, err := p.client.refreshToken(rt)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not refresh token: %v", err)
|
||||
}
|
||||
jwt, err := jose.ParseJWT(tokens.IDToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if err := p.client.verifyJWT(&jwt); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Create a new config to persist.
|
||||
newCfg := make(map[string]string)
|
||||
for key, val := range p.cfg {
|
||||
newCfg[key] = val
|
||||
}
|
||||
|
||||
if tokens.RefreshToken != "" && tokens.RefreshToken != rt {
|
||||
newCfg[cfgRefreshToken] = tokens.RefreshToken
|
||||
}
|
||||
|
||||
newCfg[cfgIDToken] = tokens.IDToken
|
||||
if err = p.persister.Persist(newCfg); err != nil {
|
||||
return "", fmt.Errorf("could not perist new tokens: %v", err)
|
||||
}
|
||||
|
||||
// Update the in memory config to reflect the on disk one.
|
||||
p.cfg = newCfg
|
||||
|
||||
return tokens.IDToken, nil
|
||||
}
|
||||
|
||||
// oidcClient is the real implementation of the OIDCClient interface, which is
|
||||
// used for testing.
|
||||
type oidcClient struct {
|
||||
client *oidc.Client
|
||||
}
|
||||
|
||||
func (o *oidcClient) refreshToken(rt string) (oauth2.TokenResponse, error) {
|
||||
oac, err := o.client.OAuthClient()
|
||||
if err != nil {
|
||||
return oauth2.TokenResponse{}, err
|
||||
}
|
||||
|
||||
return oac.RequestToken(oauth2.GrantTypeRefreshToken, rt)
|
||||
}
|
||||
|
||||
func (o *oidcClient) verifyJWT(jwt *jose.JWT) error {
|
||||
return o.client.VerifyJWT(*jwt)
|
||||
}
|
||||
|
||||
func verifyJWTExpiry(now time.Time, s string) (valid bool, err error) {
|
||||
jwt, err := jose.ParseJWT(s)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("invalid %q", cfgIDToken)
|
||||
}
|
||||
claims, err := jwt.Claims()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
exp, ok, err := claims.TimeClaim("exp")
|
||||
switch {
|
||||
case err != nil:
|
||||
return false, fmt.Errorf("failed to parse 'exp' claim: %v", err)
|
||||
case !ok:
|
||||
return false, errors.New("missing required 'exp' claim")
|
||||
case exp.After(now.Add(expiryDelta)):
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
384
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc_test.go
generated
vendored
384
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/oidc/oidc_test.go
generated
vendored
|
@ -1,384 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oidc
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/go-oidc/jose"
|
||||
"github.com/coreos/go-oidc/key"
|
||||
"github.com/coreos/go-oidc/oauth2"
|
||||
|
||||
oidctesting "k8s.io/kubernetes/plugin/pkg/auth/authenticator/token/oidc/testing"
|
||||
)
|
||||
|
||||
func clearCache() {
|
||||
cache = newClientCache()
|
||||
}
|
||||
|
||||
type persister struct{}
|
||||
|
||||
// we don't need to actually persist anything because there's no way for us to
|
||||
// read from a persister.
|
||||
func (p *persister) Persist(map[string]string) error { return nil }
|
||||
|
||||
type noRefreshOIDCClient struct{}
|
||||
|
||||
func (c *noRefreshOIDCClient) refreshToken(rt string) (oauth2.TokenResponse, error) {
|
||||
return oauth2.TokenResponse{}, errors.New("alwaysErrOIDCClient: cannot refresh token")
|
||||
}
|
||||
|
||||
func (c *noRefreshOIDCClient) verifyJWT(jwt *jose.JWT) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
type mockOIDCClient struct {
|
||||
tokenResponse oauth2.TokenResponse
|
||||
}
|
||||
|
||||
func (c *mockOIDCClient) refreshToken(rt string) (oauth2.TokenResponse, error) {
|
||||
return c.tokenResponse, nil
|
||||
}
|
||||
|
||||
func (c *mockOIDCClient) verifyJWT(jwt *jose.JWT) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestNewOIDCAuthProvider(t *testing.T) {
|
||||
tempDir, err := ioutil.TempDir(os.TempDir(), "oidc_test")
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot make temp dir %v", err)
|
||||
}
|
||||
cert := path.Join(tempDir, "oidc-cert")
|
||||
key := path.Join(tempDir, "oidc-key")
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
oidctesting.GenerateSelfSignedCert(t, "127.0.0.1", cert, key)
|
||||
op := oidctesting.NewOIDCProvider(t, "")
|
||||
srv, err := op.ServeTLSWithKeyPair(cert, key)
|
||||
if err != nil {
|
||||
t.Fatalf("Cannot start server %v", err)
|
||||
}
|
||||
defer srv.Close()
|
||||
|
||||
certData, err := ioutil.ReadFile(cert)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not read cert bytes %v", err)
|
||||
}
|
||||
|
||||
makeToken := func(exp time.Time) *jose.JWT {
|
||||
jwt, err := jose.NewSignedJWT(jose.Claims(map[string]interface{}{
|
||||
"exp": exp.UTC().Unix(),
|
||||
}), op.PrivKey.Signer())
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create signed JWT %v", err)
|
||||
}
|
||||
return jwt
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
|
||||
goodToken := makeToken(t0.Add(time.Hour)).Encode()
|
||||
expiredToken := makeToken(t0.Add(-time.Hour)).Encode()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
cfg map[string]string
|
||||
wantInitErr bool
|
||||
|
||||
client OIDCClient
|
||||
wantCfg map[string]string
|
||||
wantTokenErr bool
|
||||
}{
|
||||
{
|
||||
// A Valid configuration
|
||||
name: "no id token and no refresh token",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
},
|
||||
wantTokenErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid config with an initial token",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgIDToken: goodToken,
|
||||
},
|
||||
client: new(noRefreshOIDCClient),
|
||||
wantCfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgIDToken: goodToken,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ID token with a refresh token",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgRefreshToken: "foo",
|
||||
cfgIDToken: expiredToken,
|
||||
},
|
||||
client: &mockOIDCClient{
|
||||
tokenResponse: oauth2.TokenResponse{
|
||||
IDToken: goodToken,
|
||||
},
|
||||
},
|
||||
wantCfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgRefreshToken: "foo",
|
||||
cfgIDToken: goodToken,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "invalid ID token with a refresh token, server returns new refresh token",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgRefreshToken: "foo",
|
||||
cfgIDToken: expiredToken,
|
||||
},
|
||||
client: &mockOIDCClient{
|
||||
tokenResponse: oauth2.TokenResponse{
|
||||
IDToken: goodToken,
|
||||
RefreshToken: "bar",
|
||||
},
|
||||
},
|
||||
wantCfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgRefreshToken: "bar",
|
||||
cfgIDToken: goodToken,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "expired token and no refresh otken",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
cfgIDToken: expiredToken,
|
||||
},
|
||||
wantTokenErr: true,
|
||||
},
|
||||
{
|
||||
name: "valid base64d ca",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthorityData: base64.StdEncoding.EncodeToString(certData),
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "client-secret",
|
||||
},
|
||||
client: new(noRefreshOIDCClient),
|
||||
wantTokenErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing client ID",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientSecret: "client-secret",
|
||||
},
|
||||
wantInitErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing client secret",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
},
|
||||
wantInitErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing issuer URL",
|
||||
cfg: map[string]string{
|
||||
cfgCertificateAuthority: cert,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "secret",
|
||||
},
|
||||
wantInitErr: true,
|
||||
},
|
||||
{
|
||||
name: "missing TLS config",
|
||||
cfg: map[string]string{
|
||||
cfgIssuerUrl: srv.URL,
|
||||
cfgClientID: "client-id",
|
||||
cfgClientSecret: "secret",
|
||||
},
|
||||
wantInitErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
clearCache()
|
||||
|
||||
p, err := newOIDCAuthProvider("cluster.example.com", tt.cfg, new(persister))
|
||||
if tt.wantInitErr {
|
||||
if err == nil {
|
||||
t.Errorf("%s: want non-nil err", tt.name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error on newOIDCAuthProvider: %v", tt.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
provider := p.(*oidcAuthProvider)
|
||||
provider.client = tt.client
|
||||
provider.now = func() time.Time { return t0 }
|
||||
|
||||
if _, err := provider.idToken(); err != nil {
|
||||
if !tt.wantTokenErr {
|
||||
t.Errorf("%s: failed to get id token: %v", tt.name, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if tt.wantTokenErr {
|
||||
t.Errorf("%s: expected to not get id token: %v", tt.name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tt.wantCfg, provider.cfg) {
|
||||
t.Errorf("%s: expected config %#v got %#v", tt.name, tt.wantCfg, provider.cfg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVerifyJWTExpiry(t *testing.T) {
|
||||
privKey, err := key.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("can't generate private key: %v", err)
|
||||
}
|
||||
makeToken := func(s string, exp time.Time, count int) *jose.JWT {
|
||||
jwt, err := jose.NewSignedJWT(jose.Claims(map[string]interface{}{
|
||||
"test": s,
|
||||
"exp": exp.UTC().Unix(),
|
||||
"count": count,
|
||||
}), privKey.Signer())
|
||||
if err != nil {
|
||||
t.Fatalf("Could not create signed JWT %v", err)
|
||||
}
|
||||
return jwt
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
jwt *jose.JWT
|
||||
now time.Time
|
||||
wantErr bool
|
||||
wantExpired bool
|
||||
}{
|
||||
{
|
||||
name: "valid jwt",
|
||||
jwt: makeToken("foo", t0.Add(time.Hour), 1),
|
||||
now: t0,
|
||||
},
|
||||
{
|
||||
name: "invalid jwt",
|
||||
jwt: &jose.JWT{},
|
||||
now: t0,
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "expired jwt",
|
||||
jwt: makeToken("foo", t0.Add(-time.Hour), 1),
|
||||
now: t0,
|
||||
wantExpired: true,
|
||||
},
|
||||
{
|
||||
name: "jwt expires soon enough to be marked expired",
|
||||
jwt: makeToken("foo", t0, 1),
|
||||
now: t0,
|
||||
wantExpired: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
func() {
|
||||
valid, err := verifyJWTExpiry(tc.now, tc.jwt.Encode())
|
||||
if err != nil {
|
||||
if !tc.wantErr {
|
||||
t.Errorf("%s: %v", tc.name, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if tc.wantErr {
|
||||
t.Errorf("%s: expected error", tc.name)
|
||||
return
|
||||
}
|
||||
|
||||
if valid && tc.wantExpired {
|
||||
t.Errorf("%s: expected token to be expired", tc.name)
|
||||
}
|
||||
if !valid && !tc.wantExpired {
|
||||
t.Errorf("%s: expected token to be valid", tc.name)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientCache(t *testing.T) {
|
||||
cache := newClientCache()
|
||||
|
||||
if _, ok := cache.getClient("issuer1", "id1", "secret1"); ok {
|
||||
t.Fatalf("got client before putting one in the cache")
|
||||
}
|
||||
|
||||
cli1 := new(oidcAuthProvider)
|
||||
cli2 := new(oidcAuthProvider)
|
||||
|
||||
gotcli := cache.setClient("issuer1", "id1", "secret1", cli1)
|
||||
if cli1 != gotcli {
|
||||
t.Fatalf("set first client and got a different one")
|
||||
}
|
||||
|
||||
gotcli = cache.setClient("issuer1", "id1", "secret1", cli2)
|
||||
if cli1 != gotcli {
|
||||
t.Fatalf("set a second client and didn't get the first")
|
||||
}
|
||||
}
|
23
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go
generated
vendored
23
vendor/k8s.io/kubernetes/plugin/pkg/client/auth/plugins.go
generated
vendored
|
@ -1,23 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package auth
|
||||
|
||||
import (
|
||||
// Initialize all known client auth plugins.
|
||||
_ "k8s.io/kubernetes/plugin/pkg/client/auth/gcp"
|
||||
_ "k8s.io/kubernetes/plugin/pkg/client/auth/oidc"
|
||||
)
|
9
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/BUILD
generated
vendored
|
@ -19,8 +19,9 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/record:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/util:go_default_library",
|
||||
"//pkg/util/hash:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
|
@ -29,11 +30,15 @@ go_library(
|
|||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/metrics:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/scheduler/util:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:github.com/golang/groupcache/lru",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -60,6 +65,7 @@ go_test(
|
|||
"//plugin/pkg/scheduler/api:go_default_library",
|
||||
"//plugin/pkg/scheduler/schedulercache:go_default_library",
|
||||
"//plugin/pkg/scheduler/testing:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/diff",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
|
@ -85,6 +91,7 @@ filegroup(
|
|||
"//plugin/pkg/scheduler/metrics:all-srcs",
|
||||
"//plugin/pkg/scheduler/schedulercache:all-srcs",
|
||||
"//plugin/pkg/scheduler/testing:all-srcs",
|
||||
"//plugin/pkg/scheduler/util:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
|
15
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/OWNERS
generated
vendored
15
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/OWNERS
generated
vendored
|
@ -1,9 +1,8 @@
|
|||
assignees:
|
||||
- davidopp
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
approvers:
|
||||
- davidopp
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
reviewers:
|
||||
- davidopp
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
- jayunit100
|
||||
- davidopp
|
||||
- timothysc
|
||||
- wojtek-t
|
||||
|
|
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/BUILD
generated
vendored
|
@ -19,7 +19,7 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/client/cache:go_default_library",
|
||||
"//pkg/client/legacylisters:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/util/workqueue:go_default_library",
|
||||
"//plugin/pkg/scheduler/algorithm:go_default_library",
|
||||
|
|
27
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
27
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates.go
generated
vendored
|
@ -29,7 +29,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/legacylisters"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
|
@ -64,7 +64,7 @@ type PersistentVolumeClaimInfo interface {
|
|||
|
||||
// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
|
||||
type CachedPersistentVolumeClaimInfo struct {
|
||||
*cache.StoreToPersistentVolumeClaimLister
|
||||
*listers.StoreToPersistentVolumeClaimLister
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
|
||||
|
@ -73,12 +73,12 @@ func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace
|
|||
}
|
||||
|
||||
type CachedNodeInfo struct {
|
||||
*cache.StoreToNodeLister
|
||||
*listers.StoreToNodeLister
|
||||
}
|
||||
|
||||
// GetNodeInfo returns cached data for the node 'id'.
|
||||
func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
||||
node, exists, err := c.Get(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: id}})
|
||||
node, exists, err := c.Get(&v1.Node{ObjectMeta: metav1.ObjectMeta{Name: id}})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
|
||||
|
@ -463,11 +463,7 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
|||
result.NvidiaGPU += rQuantity.Value()
|
||||
default:
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
// Lazily allocate this map only if required.
|
||||
if result.OpaqueIntResources == nil {
|
||||
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||
}
|
||||
result.OpaqueIntResources[rName] += rQuantity.Value()
|
||||
result.AddOpaque(rName, rQuantity.Value())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -490,11 +486,9 @@ func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
|||
}
|
||||
default:
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
// Lazily allocate this map only if required.
|
||||
if result.OpaqueIntResources == nil {
|
||||
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||
}
|
||||
value := rQuantity.Value()
|
||||
// Ensure the opaque resource map is initialized in the result.
|
||||
result.AddOpaque(rName, int64(0))
|
||||
if value > result.OpaqueIntResources[rName] {
|
||||
result.OpaqueIntResources[rName] = value
|
||||
}
|
||||
|
@ -1170,11 +1164,6 @@ func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint)
|
|||
return true
|
||||
}
|
||||
|
||||
// The taint list isn't nil/empty, a nil/empty toleration list can't tolerate them.
|
||||
if len(tolerations) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
for i := range taints {
|
||||
taint := &taints[i]
|
||||
// skip taints that have effect PreferNoSchedule, since it is for priorities
|
||||
|
@ -1182,7 +1171,7 @@ func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint)
|
|||
continue
|
||||
}
|
||||
|
||||
if !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||
if len(tolerations) == 0 || !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
385
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
385
vendor/k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go
generated
vendored
|
@ -418,7 +418,7 @@ func TestPodFitsHost(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
|
@ -432,7 +432,7 @@ func TestPodFitsHost(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
|
@ -1184,7 +1184,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||
// TODO: Uncomment this test when implement RequiredDuringSchedulingRequiredDuringExecution
|
||||
// {
|
||||
// pod: &v1.Pod{
|
||||
// ObjectMeta: v1.ObjectMeta{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Annotations: map[string]string{
|
||||
// v1.AffinityAnnotationKey: `
|
||||
// {"nodeAffinity": {
|
||||
|
@ -1283,7 +1283,7 @@ func TestPodFitsSelector(t *testing.T) {
|
|||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}
|
||||
|
||||
for _, test := range tests {
|
||||
node := v1.Node{ObjectMeta: v1.ObjectMeta{Labels: test.labels}}
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: test.labels}}
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
|
@ -1349,7 +1349,7 @@ func TestNodeLabelPresence(t *testing.T) {
|
|||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated}
|
||||
|
||||
for _, test := range tests {
|
||||
node := v1.Node{ObjectMeta: v1.ObjectMeta{Labels: label}}
|
||||
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
|
||||
nodeInfo := schedulercache.NewNodeInfo()
|
||||
nodeInfo.SetNode(&node)
|
||||
|
||||
|
@ -1385,11 +1385,11 @@ func TestServiceAffinity(t *testing.T) {
|
|||
"region": "r2",
|
||||
"zone": "z22",
|
||||
}
|
||||
node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
node2 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labels2}}
|
||||
node3 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labels3}}
|
||||
node4 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labels4}}
|
||||
node5 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labels4}}
|
||||
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
node2 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labels2}}
|
||||
node3 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labels3}}
|
||||
node4 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine4", Labels: labels4}}
|
||||
node5 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine5", Labels: labels4}}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
|
@ -1421,8 +1421,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||
test: "pod with region label mismatch",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: selector}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
|
@ -1430,8 +1430,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||
test: "service pod on same node",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: selector}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
|
@ -1439,8 +1439,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||
test: "service pod on different node, region match",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: selector}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
fits: false,
|
||||
|
@ -1448,35 +1448,35 @@ func TestServiceAffinity(t *testing.T) {
|
|||
test: "service pod on different node, region mismatch",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns2"}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns2"}}},
|
||||
fits: true,
|
||||
labels: []string{"region"},
|
||||
test: "service in different namespace, region mismatch",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns2"}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
fits: true,
|
||||
labels: []string{"region"},
|
||||
test: "pod in different namespace, region mismatch",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: metav1.ObjectMeta{Labels: selector, Namespace: "ns1"}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
fits: false,
|
||||
labels: []string{"region"},
|
||||
test: "service and pod in same namespace, region mismatch",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: selector}}},
|
||||
node: &node1,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
fits: false,
|
||||
|
@ -1484,8 +1484,8 @@ func TestServiceAffinity(t *testing.T) {
|
|||
test: "service pod on different node, multiple labels, not all match",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}},
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: selector}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: metav1.ObjectMeta{Labels: selector}}},
|
||||
node: &node4,
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}},
|
||||
fits: true,
|
||||
|
@ -1749,7 +1749,7 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
|
|||
|
||||
pvInfo := FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "someEBSVol"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someEBSVol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ebsVol"},
|
||||
|
@ -1757,7 +1757,7 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "someNonEBSVol"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNonEBSVol"},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{},
|
||||
},
|
||||
|
@ -1766,15 +1766,15 @@ func TestEBSVolumeCountConflicts(t *testing.T) {
|
|||
|
||||
pvcInfo := FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "someEBSVol"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someEBSVol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someEBSVol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "someNonEBSVol"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "someNonEBSVol"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNonEBSVol"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pvcWithDeletedPV"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"},
|
||||
},
|
||||
}
|
||||
|
@ -1841,7 +1841,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)},
|
||||
},
|
||||
fits: true,
|
||||
|
@ -1853,7 +1853,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
nodeInfo: schedulercache.NewNodeInfo(
|
||||
newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)},
|
||||
},
|
||||
fits: false,
|
||||
|
@ -1900,7 +1900,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
},
|
||||
nodeInfo: schedulercache.NewNodeInfo(),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)},
|
||||
},
|
||||
fits: false,
|
||||
|
@ -1912,7 +1912,7 @@ func TestRunGeneralPredicates(t *testing.T) {
|
|||
pod: newPodWithPort(123),
|
||||
nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)),
|
||||
node: &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine1"},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "machine1"},
|
||||
Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)},
|
||||
},
|
||||
fits: false,
|
||||
|
@ -1943,7 +1943,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
"zone": "z11",
|
||||
}
|
||||
podLabel2 := map[string]string{"security": "S1"}
|
||||
node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
node1 := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labels1}}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
|
@ -1959,7 +1959,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -1983,14 +1983,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: true,
|
||||
test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2014,14 +2014,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: true,
|
||||
test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2045,14 +2045,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel, Namespace: "ns"}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel, Namespace: "ns"}}},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2075,14 +2075,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2123,14 +2123,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2171,14 +2171,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2218,7 +2218,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: true,
|
||||
test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod",
|
||||
|
@ -2226,7 +2226,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//{
|
||||
// pod: &v1.Pod{
|
||||
// ObjectMeta: v1.ObjectMeta{
|
||||
// ObjectMeta: metav1.ObjectMeta{
|
||||
// Labels: podLabel2,
|
||||
// Annotations: map[string]string{
|
||||
// v1.AffinityAnnotationKey: `
|
||||
|
@ -2262,14 +2262,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
// },
|
||||
// },
|
||||
// },
|
||||
// pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podlabel}}},
|
||||
// pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podlabel}}},
|
||||
// node: &node1,
|
||||
// fits: true,
|
||||
// test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ",
|
||||
//},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2332,7 +2332,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{Labels: podLabel},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: &node1,
|
||||
|
@ -2341,7 +2341,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel2,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2381,14 +2381,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2451,7 +2451,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{Labels: podLabel},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: &node1,
|
||||
|
@ -2460,7 +2460,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
|
@ -2484,14 +2484,14 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}},
|
||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabel}}},
|
||||
node: &node1,
|
||||
fits: false,
|
||||
test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
},
|
||||
},
|
||||
|
@ -2517,7 +2517,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{Labels: podLabel},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: &node1,
|
||||
|
@ -2526,7 +2526,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: podLabel,
|
||||
},
|
||||
},
|
||||
|
@ -2552,7 +2552,7 @@ func TestInterPodAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: v1.ObjectMeta{Labels: podLabel},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: podLabel},
|
||||
},
|
||||
},
|
||||
node: &node1,
|
||||
|
@ -2642,12 +2642,12 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelA}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelA}},
|
||||
},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"machine1": true,
|
||||
|
@ -2695,12 +2695,12 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "def"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "def"}}},
|
||||
},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
|
@ -2710,7 +2710,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
|
@ -2738,8 +2738,8 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
pods: []*v1.Pod{},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": true,
|
||||
|
@ -2772,11 +2772,11 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
|
@ -2808,12 +2808,12 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||
},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
|
@ -2824,7 +2824,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "123"}},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "123"}},
|
||||
Spec: v1.PodSpec{
|
||||
Affinity: &v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
|
@ -2847,7 +2847,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}},
|
||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar"}}},
|
||||
{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "nodeC",
|
||||
|
@ -2873,10 +2873,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
nodes: []v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeD", Labels: labelRgUS}},
|
||||
},
|
||||
fits: map[string]bool{
|
||||
"nodeA": false,
|
||||
|
@ -2954,12 +2954,12 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod0",
|
||||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -2975,7 +2975,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -2991,7 +2991,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3007,7 +3007,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3024,7 +3024,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3040,7 +3040,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3056,7 +3056,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3072,7 +3072,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3093,7 +3093,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3113,7 +3113,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3130,7 +3130,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3147,7 +3147,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3163,7 +3163,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3180,7 +3180,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: `
|
||||
|
@ -3197,7 +3197,7 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
|
@ -3212,6 +3212,31 @@ func TestPodToleratesTaints(t *testing.T) {
|
|||
test: "The pod has a toleration that key and value don't match the taint on the node, " +
|
||||
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Image: "pod2:V1"}},
|
||||
},
|
||||
},
|
||||
node: v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
v1.TaintsAnnotationKey: `
|
||||
[{
|
||||
"key": "dedicated",
|
||||
"value": "user1",
|
||||
"effect": "PreferNoSchedule"
|
||||
}]`,
|
||||
},
|
||||
},
|
||||
},
|
||||
fits: true,
|
||||
test: "The pod has no toleration, " +
|
||||
"but the effect of taint on node is PreferNochedule. Pod can be scheduled onto the node",
|
||||
},
|
||||
}
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}
|
||||
|
||||
|
@ -3412,3 +3437,149 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: pod, Namespace: "default"},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: pv,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvc,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeZonePredicate(t *testing.T) {
|
||||
pvInfo := FakePersistentVolumeInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1"}},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_2", Labels: map[string]string{metav1.LabelZoneRegion: "zone_2", "uselessLabel": "none"}},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_3", Labels: map[string]string{metav1.LabelZoneRegion: "zone_3"}},
|
||||
},
|
||||
}
|
||||
|
||||
pvcInfo := FakePersistentVolumeClaimInfo{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_2", Namespace: "default"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_2"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_3", Namespace: "default"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_3"},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_4", Namespace: "default"},
|
||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_not_exist"},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
Name string
|
||||
Pod *v1.Pod
|
||||
Fits bool
|
||||
Node *v1.Node
|
||||
}{
|
||||
{
|
||||
Name: "pod without volume",
|
||||
Pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pod_1", Namespace: "default"},
|
||||
},
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
},
|
||||
{
|
||||
Name: "node without labels",
|
||||
Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
},
|
||||
{
|
||||
Name: "label zone failure domain matched",
|
||||
Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "zone_1", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
},
|
||||
{
|
||||
Name: "label zone region matched",
|
||||
Pod: createPodWithVolume("pod_1", "vol_1", "PVC_2"),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneRegion: "zone_2", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: true,
|
||||
},
|
||||
{
|
||||
Name: "label zone region failed match",
|
||||
Pod: createPodWithVolume("pod_1", "vol_1", "PVC_2"),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneRegion: "no_zone_2", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: false,
|
||||
},
|
||||
{
|
||||
Name: "label zone failure domain failed match",
|
||||
Pod: createPodWithVolume("pod_1", "vol_1", "PVC_1"),
|
||||
Node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "host1",
|
||||
Labels: map[string]string{metav1.LabelZoneFailureDomain: "no_zone_1", "uselessLabel": "none"},
|
||||
},
|
||||
},
|
||||
Fits: false,
|
||||
},
|
||||
}
|
||||
|
||||
expectedFailureReasons := []algorithm.PredicateFailureReason{ErrVolumeZoneConflict}
|
||||
|
||||
for _, test := range tests {
|
||||
fit := NewVolumeZonePredicate(pvInfo, pvcInfo)
|
||||
node := &schedulercache.NodeInfo{}
|
||||
node.SetNode(test.Node)
|
||||
|
||||
fits, reasons, err := fit(test.Pod, nil, node)
|
||||
if err != nil {
|
||||
t.Errorf("%s: unexpected error: %v", test.Name, err)
|
||||
}
|
||||
if !fits && !reflect.DeepEqual(reasons, expectedFailureReasons) {
|
||||
t.Errorf("%s: unexpected failure reasons: %v, want: %v", test.Name, reasons, expectedFailureReasons)
|
||||
}
|
||||
if fits != test.Fits {
|
||||
t.Errorf("%s: expected %v got %v", test.Name, test.Fits, fits)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue