Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

27
vendor/k8s.io/kubernetes/test/BUILD generated vendored Normal file
View file

@ -0,0 +1,27 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e:all-srcs",
"//test/e2e_node:all-srcs",
"//test/fixtures:all-srcs",
"//test/images:all-srcs",
"//test/integration:all-srcs",
"//test/list:all-srcs",
"//test/soak/cauldron:all-srcs",
"//test/soak/serve_hostnames:all-srcs",
"//test/utils:all-srcs",
],
tags = ["automanaged"],
)

4
vendor/k8s.io/kubernetes/test/OWNERS generated vendored Normal file
View file

@ -0,0 +1,4 @@
assignees:
- fejta
- ixdy
- spxtr

268
vendor/k8s.io/kubernetes/test/e2e/BUILD generated vendored Normal file
View file

@ -0,0 +1,268 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"addon_update.go",
"apparmor.go",
"autoscaling_utils.go",
"cadvisor.go",
"cluster_logging_es.go",
"cluster_logging_gcl.go",
"cluster_logging_utils.go",
"cluster_size_autoscaling.go",
"cluster_upgrade.go",
"cronjob.go",
"daemon_restart.go",
"daemon_set.go",
"dashboard.go",
"density.go",
"deployment.go",
"disruption.go",
"dns.go",
"dns_autoscaling.go",
"dns_configmap.go",
"e2e.go",
"empty.go",
"empty_dir_wrapper.go",
"etcd_failure.go",
"events.go",
"example_cluster_dns.go",
"example_k8petstore.go",
"examples.go",
"federated-ingress.go",
"federated-namespace.go",
"federated-secret.go",
"federated-service.go",
"federation-apiserver.go",
"federation-authn.go",
"federation-daemonset.go",
"federation-deployment.go",
"federation-event.go",
"federation-replicaset.go",
"federation-util.go",
"firewall.go",
"garbage_collector.go",
"generated_clientset.go",
"gke_local_ssd.go",
"gke_node_pools.go",
"ha_master.go",
"horizontal_pod_autoscaling.go",
"ingress.go",
"ingress_utils.go",
"initial_resources.go",
"job.go",
"kibana_logging.go",
"kube_proxy.go",
"kubectl.go",
"kubelet.go",
"kubelet_perf.go",
"limit_range.go",
"load.go",
"logging_soak.go",
"mesos.go",
"monitoring.go",
"namespace.go",
"network_partition.go",
"networking.go",
"networking_perf.go",
"node_problem_detector.go",
"nodeoutofdisk.go",
"opaque_resource.go",
"pd.go",
"persistent_volumes.go",
"persistent_volumes-disruptive.go",
"petset.go",
"pod_gc.go",
"pods.go",
"portforward.go",
"pre_stop.go",
"proxy.go",
"rc.go",
"reboot.go",
"replica_set.go",
"rescheduler.go",
"resize_nodes.go",
"resource_quota.go",
"restart.go",
"scheduler_predicates.go",
"security_context.go",
"service.go",
"service_accounts.go",
"service_latency.go",
"serviceloadbalancers.go",
"ssh.go",
"third-party.go",
"ubernetes_lite.go",
"util_iperf.go",
"volume_provisioning.go",
"volumes.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_clientset:go_default_library",
"//federation/client/clientset_generated/federation_clientset/typed/core/v1:go_default_library",
"//federation/pkg/federation-controller/util:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/annotations:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/api/v1/service:go_default_library",
"//pkg/apis/apps/v1beta1:go_default_library",
"//pkg/apis/autoscaling/v1:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//pkg/apis/batch/v2alpha1:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/rbac/v1alpha1:go_default_library",
"//pkg/apis/storage/util:go_default_library",
"//pkg/apis/storage/v1beta1:go_default_library",
"//pkg/apis/storage/v1beta1/util:go_default_library",
"//pkg/client/cache:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/extensions/v1beta1:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/transport:go_default_library",
"//pkg/client/unversioned/clientcmd:go_default_library",
"//pkg/client/unversioned/clientcmd/api:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/aws:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/controller/endpoint:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/petset:go_default_library",
"//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/kubectl:go_default_library",
"//pkg/kubectl/cmd/util:go_default_library",
"//pkg/kubelet/api/v1alpha1/stats:go_default_library",
"//pkg/kubelet/pod:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/metrics:go_default_library",
"//pkg/quota/evaluator/core:go_default_library",
"//pkg/registry/generic/registry:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/flowcontrol:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/logs:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/uuid:go_default_library",
"//pkg/util/version:go_default_library",
"//pkg/util/workqueue:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//plugin/pkg/admission/serviceaccount:go_default_library",
"//test/e2e/chaosmonkey:go_default_library",
"//test/e2e/common:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/e2e/generated:go_default_library",
"//test/images/net/nat:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/aws/aws-sdk-go/aws",
"//vendor:github.com/aws/aws-sdk-go/aws/awserr",
"//vendor:github.com/aws/aws-sdk-go/aws/session",
"//vendor:github.com/aws/aws-sdk-go/service/autoscaling",
"//vendor:github.com/aws/aws-sdk-go/service/ec2",
"//vendor:github.com/elazarl/goproxy",
"//vendor:github.com/ghodss/yaml",
"//vendor:github.com/golang/glog",
"//vendor:github.com/google/cadvisor/info/v1",
"//vendor:github.com/influxdata/influxdb/client",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/ginkgo/config",
"//vendor:github.com/onsi/ginkgo/reporters",
"//vendor:github.com/onsi/gomega",
"//vendor:github.com/stretchr/testify/assert",
"//vendor:golang.org/x/crypto/ssh",
"//vendor:google.golang.org/api/compute/v1",
"//vendor:google.golang.org/api/googleapi",
"//vendor:gopkg.in/inf.v0",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/json",
"//vendor:k8s.io/apimachinery/pkg/util/net",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/apimachinery/pkg/util/yaml",
"//vendor:k8s.io/apimachinery/pkg/watch",
"//vendor:k8s.io/client-go/kubernetes",
"//vendor:k8s.io/client-go/pkg/api/v1",
"//vendor:k8s.io/client-go/pkg/apis/extensions/v1beta1",
"//vendor:k8s.io/client-go/pkg/apis/policy/v1beta1",
"//vendor:k8s.io/client-go/pkg/util/intstr",
],
)
go_test(
name = "go_default_test",
srcs = [
"e2e_test.go",
"metrics_grabber_test.go",
],
library = ":go_default_library",
tags = [
"automanaged",
"integration",
],
deps = [
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/metrics:go_default_library",
"//test/e2e/framework:go_default_library",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/gomega",
],
)
genrule(
name = "gen_e2e.test",
testonly = 1,
srcs = [":go_default_test"],
outs = ["e2e.test"],
cmd = "srcs=($(SRCS)); cp $$(dirname $${srcs[0]})/go_default_test $@;",
output_to_bindir = 1,
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//test/e2e/chaosmonkey:all-srcs",
"//test/e2e/common:all-srcs",
"//test/e2e/framework:all-srcs",
"//test/e2e/generated:all-srcs",
"//test/e2e/perftype:all-srcs",
"//test/e2e/testing-manifests:all-srcs",
],
tags = ["automanaged"],
)

438
vendor/k8s.io/kubernetes/test/e2e/addon_update.go generated vendored Normal file
View file

@ -0,0 +1,438 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"fmt"
"io"
"os"
"strings"
"time"
"golang.org/x/crypto/ssh"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// TODO: it would probably be slightly better to build up the objects
// in the code and then serialize to yaml.
var addon_controller_v1 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-test-v1
namespace: %s
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v1
template:
metadata:
labels:
k8s-app: addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_controller_v2 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: addon-test-v2
namespace: %s
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
replicas: 2
selector:
k8s-app: addon-test
version: v2
template:
metadata:
labels:
k8s-app: addon-test
version: v2
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: addon-test
ports:
- containerPort: 9376
protocol: TCP
`
var addon_service_v1 = `
apiVersion: v1
kind: Service
metadata:
name: addon-test
namespace: %s
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
var addon_service_v2 = `
apiVersion: v1
kind: Service
metadata:
name: addon-test-updated
namespace: %s
labels:
k8s-app: addon-test
kubernetes.io/cluster-service: "true"
kubernetes.io/name: addon-test
newLabel: newValue
spec:
ports:
- port: 9376
protocol: TCP
targetPort: 9376
selector:
k8s-app: addon-test
`
// Wrong label case
var invalid_addon_controller_v1 = `
apiVersion: v1
kind: ReplicationController
metadata:
name: invalid-addon-test-v1
namespace: %s
labels:
k8s-app: invalid-addon-test
version: v1
spec:
replicas: 2
selector:
k8s-app: invalid-addon-test
version: v1
template:
metadata:
labels:
k8s-app: invalid-addon-test
version: v1
kubernetes.io/cluster-service: "true"
spec:
containers:
- image: gcr.io/google_containers/serve_hostname:v1.4
name: invalid-addon-test
ports:
- containerPort: 9376
protocol: TCP
`
// Wrong label case
var invalid_addon_service_v1 = `
apiVersion: v1
kind: Service
metadata:
name: ivalid-addon-test
namespace: %s
labels:
k8s-app: invalid-addon-test
kubernetes.io/name: invalid-addon-test
spec:
ports:
- port: 9377
protocol: TCP
targetPort: 9376
selector:
k8s-app: invalid-addon-test
`
// Wrong namespace case
var invalid_addon_service_v2 = `
apiVersion: v1
kind: Service
metadata:
name: ivalid-addon-test-v2
namespace: %s
labels:
k8s-app: invalid-addon-test-v2
kubernetes.io/cluster-service: "true"
spec:
ports:
- port: 9377
protocol: TCP
targetPort: 9376
selector:
k8s-app: invalid-addon-test
`
const (
addonTestPollInterval = 3 * time.Second
addonTestPollTimeout = 5 * time.Minute
defaultNsName = v1.NamespaceDefault
addonNsName = "kube-system"
)
type stringPair struct {
data, fileName string
}
var _ = framework.KubeDescribe("Addon update", func() {
var dir string
var sshClient *ssh.Client
f := framework.NewDefaultFramework("addon-update-test")
BeforeEach(func() {
// This test requires:
// - SSH master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
if !framework.ProviderIs("gce") {
return
}
var err error
sshClient, err = getMasterSSHClient()
Expect(err).NotTo(HaveOccurred(), "Failed to get the master SSH client.")
})
AfterEach(func() {
if sshClient != nil {
sshClient.Close()
}
})
// WARNING: the test is not parallel-friendly!
It("should propagate add-on file changes [Slow]", func() {
// This test requires:
// - SSH
// - master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
//these tests are long, so I squeezed several cases in one scenario
Expect(sshClient).NotTo(BeNil())
dir = f.Namespace.Name // we use it only to give a unique string for each test execution
temporaryRemotePathPrefix := "addon-test-dir"
temporaryRemotePath := temporaryRemotePathPrefix + "/" + dir // in home directory on kubernetes-master
defer sshExec(sshClient, fmt.Sprintf("rm -rf %s", temporaryRemotePathPrefix)) // ignore the result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("mkdir -p %s", temporaryRemotePath))
rcv1 := "addon-controller-v1.yaml"
rcv2 := "addon-controller-v2.yaml"
rcInvalid := "invalid-addon-controller-v1.yaml"
svcv1 := "addon-service-v1.yaml"
svcv2 := "addon-service-v2.yaml"
svcInvalid := "invalid-addon-service-v1.yaml"
svcInvalidv2 := "invalid-addon-service-v2.yaml"
var remoteFiles []stringPair = []stringPair{
{fmt.Sprintf(addon_controller_v1, addonNsName), rcv1},
{fmt.Sprintf(addon_controller_v2, addonNsName), rcv2},
{fmt.Sprintf(addon_service_v1, addonNsName), svcv1},
{fmt.Sprintf(addon_service_v2, addonNsName), svcv2},
{fmt.Sprintf(invalid_addon_controller_v1, addonNsName), rcInvalid},
{fmt.Sprintf(invalid_addon_service_v1, addonNsName), svcInvalid},
{fmt.Sprintf(invalid_addon_service_v2, defaultNsName), svcInvalidv2},
}
for _, p := range remoteFiles {
err := writeRemoteFile(sshClient, p.data, temporaryRemotePath, p.fileName, 0644)
Expect(err).NotTo(HaveOccurred(), "Failed to write file %q at remote path %q with ssh client %+v", p.fileName, temporaryRemotePath, sshClient)
}
// directory on kubernetes-master
destinationDirPrefix := "/etc/kubernetes/addons/addon-test-dir"
destinationDir := destinationDirPrefix + "/" + dir
// cleanup from previous tests
_, _, _, err := sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix))
Expect(err).NotTo(HaveOccurred(), "Failed to remove remote dir %q with ssh client %+v", destinationDirPrefix, sshClient)
defer sshExec(sshClient, fmt.Sprintf("sudo rm -rf %s", destinationDirPrefix)) // ignore result in cleanup
sshExecAndVerify(sshClient, fmt.Sprintf("sudo mkdir -p %s", destinationDir))
By("copy invalid manifests to the destination dir (without kubernetes.io/cluster-service label)")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcInvalid, destinationDir, rcInvalid))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcInvalid, destinationDir, svcInvalid))
// we will verify at the end of the test that the objects weren't created from the invalid manifests
By("copy new manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv1, destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv1, destinationDir, svcv1))
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", true)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", true)
By("update manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, rcv2, destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo cp %s/%s %s/%s", temporaryRemotePath, svcv2, destinationDir, svcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv1))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv1))
/**
* Note that we have a small race condition here - the kube-addon-updater
* May notice that a new rc/service file appeared, while the old one will still be there.
* But it is ok - as long as we don't have rolling update, the result will be the same
*/
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", true)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", true)
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test", false)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v1", false)
By("remove manifests")
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, rcv2))
sshExecAndVerify(sshClient, fmt.Sprintf("sudo rm %s/%s", destinationDir, svcv2))
waitForServiceInAddonTest(f.ClientSet, addonNsName, "addon-test-updated", false)
waitForReplicationControllerInAddonTest(f.ClientSet, addonNsName, "addon-test-v2", false)
By("verify invalid API addons weren't created")
_, err = f.ClientSet.Core().ReplicationControllers(addonNsName).Get("invalid-addon-test-v1", metav1.GetOptions{})
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(addonNsName).Get("ivalid-addon-test", metav1.GetOptions{})
Expect(err).To(HaveOccurred())
_, err = f.ClientSet.Core().Services(defaultNsName).Get("ivalid-addon-test-v2", metav1.GetOptions{})
Expect(err).To(HaveOccurred())
// invalid addons will be deleted by the deferred function
})
})
func waitForServiceInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForService(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
func waitForReplicationControllerInAddonTest(c clientset.Interface, addonNamespace, name string, exist bool) {
framework.ExpectNoError(framework.WaitForReplicationController(c, addonNamespace, name, exist, addonTestPollInterval, addonTestPollTimeout))
}
// TODO use the framework.SSH code, either adding an SCP to it or copying files
// differently.
func getMasterSSHClient() (*ssh.Client, error) {
// Get a signer for the provider.
signer, err := framework.GetSigner(framework.TestContext.Provider)
if err != nil {
return nil, fmt.Errorf("error getting signer for provider %s: '%v'", framework.TestContext.Provider, err)
}
sshUser := os.Getenv("KUBE_SSH_USER")
if sshUser == "" {
sshUser = os.Getenv("USER")
}
config := &ssh.ClientConfig{
User: sshUser,
Auth: []ssh.AuthMethod{ssh.PublicKeys(signer)},
}
host := framework.GetMasterHost() + ":22"
client, err := ssh.Dial("tcp", host, config)
if err != nil {
return nil, fmt.Errorf("error getting SSH client to host %s: '%v'", host, err)
}
return client, err
}
func sshExecAndVerify(client *ssh.Client, cmd string) {
_, _, rc, err := sshExec(client, cmd)
Expect(err).NotTo(HaveOccurred(), "Failed to execute %q with ssh client %+v", cmd, client)
Expect(rc).To(Equal(0), "error return code from executing command on the cluster: %s", cmd)
}
func sshExec(client *ssh.Client, cmd string) (string, string, int, error) {
framework.Logf("Executing '%s' on %v", cmd, client.RemoteAddr())
session, err := client.NewSession()
if err != nil {
return "", "", 0, fmt.Errorf("error creating session to host %s: '%v'", client.RemoteAddr(), err)
}
defer session.Close()
// Run the command.
code := 0
var bout, berr bytes.Buffer
session.Stdout, session.Stderr = &bout, &berr
err = session.Run(cmd)
if err != nil {
// Check whether the command failed to run or didn't complete.
if exiterr, ok := err.(*ssh.ExitError); ok {
// If we got an ExitError and the exit code is nonzero, we'll
// consider the SSH itself successful (just that the command run
// errored on the host).
if code = exiterr.ExitStatus(); code != 0 {
err = nil
}
} else {
// Some other kind of error happened (e.g. an IOError); consider the
// SSH unsuccessful.
err = fmt.Errorf("failed running `%s` on %s: '%v'", cmd, client.RemoteAddr(), err)
}
}
return bout.String(), berr.String(), code, err
}
func writeRemoteFile(sshClient *ssh.Client, data, dir, fileName string, mode os.FileMode) error {
framework.Logf(fmt.Sprintf("Writing remote file '%s/%s' on %v", dir, fileName, sshClient.RemoteAddr()))
session, err := sshClient.NewSession()
if err != nil {
return fmt.Errorf("error creating session to host %s: '%v'", sshClient.RemoteAddr(), err)
}
defer session.Close()
fileSize := len(data)
pipe, err := session.StdinPipe()
if err != nil {
return err
}
defer pipe.Close()
if err := session.Start(fmt.Sprintf("scp -t %s", dir)); err != nil {
return err
}
fmt.Fprintf(pipe, "C%#o %d %s\n", mode, fileSize, fileName)
io.Copy(pipe, strings.NewReader(data))
fmt.Fprint(pipe, "\x00")
pipe.Close()
return session.Wait()
}

177
vendor/k8s.io/kubernetes/test/e2e/apparmor.go generated vendored Normal file
View file

@ -0,0 +1,177 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
api "k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
profilePrefix = "e2e-apparmor-test-"
allowedPath = "/expect_allowed_write"
deniedPath = "/expect_permission_denied"
)
var _ = framework.KubeDescribe("AppArmor", func() {
f := framework.NewDefaultFramework("apparmor")
BeforeEach(func() {
SkipIfAppArmorNotSupported()
LoadAppArmorProfiles(f)
})
It("should enforce an AppArmor profile", func() {
profile := "localhost/" + profilePrefix + f.Namespace.Name
testCmd := fmt.Sprintf(`
if touch %[1]s; then
echo "FAILURE: write to %[1]s should be denied"
exit 1
elif ! touch %[2]s; then
echo "FAILURE: write to %[2]s should be allowed"
exit 2
fi`, deniedPath, allowedPath)
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{
Name: "test-apparmor",
Annotations: map[string]string{
apparmor.ContainerAnnotationKeyPrefix + "test": profile,
},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", testCmd},
}},
RestartPolicy: api.RestartPolicyNever,
},
}
f.PodClient().Create(pod)
framework.ExpectNoError(framework.WaitForPodSuccessInNamespace(
f.ClientSet, pod.Name, f.Namespace.Name))
framework.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf)
})
})
func SkipIfAppArmorNotSupported() {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
}
func LoadAppArmorProfiles(f *framework.Framework) {
_, err := createAppArmorProfileCM(f)
framework.ExpectNoError(err)
_, err = createAppArmorProfileLoader(f)
framework.ExpectNoError(err)
}
func createAppArmorProfileCM(f *framework.Framework) (*api.ConfigMap, error) {
profileName := profilePrefix + f.Namespace.Name
profile := fmt.Sprintf(`#include <tunables/global>
profile %s flags=(attach_disconnected) {
#include <abstractions/base>
file,
deny %s w,
audit %s w,
}
`, profileName, deniedPath, allowedPath)
cm := &api.ConfigMap{
ObjectMeta: api.ObjectMeta{
Name: "apparmor-profiles",
Namespace: f.Namespace.Name,
},
Data: map[string]string{
profileName: profile,
},
}
return f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(cm)
}
func createAppArmorProfileLoader(f *framework.Framework) (*extensions.DaemonSet, error) {
True := true
// Copied from https://github.com/kubernetes/contrib/blob/master/apparmor/loader/example-configmap.yaml
loader := &extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{
Name: "apparmor-loader",
Namespace: f.Namespace.Name,
},
Spec: extensions.DaemonSetSpec{
Template: api.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{
Labels: map[string]string{"name": "apparmor-loader"},
},
Spec: api.PodSpec{
Containers: []api.Container{{
Name: "apparmor-loader",
Image: "gcr.io/google_containers/apparmor-loader:0.1",
Args: []string{"-poll", "10s", "/profiles"},
SecurityContext: &api.SecurityContext{
Privileged: &True,
},
VolumeMounts: []api.VolumeMount{{
Name: "sys",
MountPath: "/sys",
ReadOnly: true,
}, {
Name: "apparmor-includes",
MountPath: "/etc/apparmor.d",
ReadOnly: true,
}, {
Name: "profiles",
MountPath: "/profiles",
ReadOnly: true,
}},
}},
Volumes: []api.Volume{{
Name: "sys",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/sys",
},
},
}, {
Name: "apparmor-includes",
VolumeSource: api.VolumeSource{
HostPath: &api.HostPathVolumeSource{
Path: "/etc/apparmor.d",
},
},
}, {
Name: "profiles",
VolumeSource: api.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{
Name: "apparmor-profiles",
},
},
},
}},
},
},
},
}
return f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Create(loader)
}

419
vendor/k8s.io/kubernetes/test/e2e/autoscaling_utils.go generated vendored Normal file
View file

@ -0,0 +1,419 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
)
const (
dynamicConsumptionTimeInSeconds = 30
staticConsumptionTimeInSeconds = 3600
dynamicRequestSizeInMillicores = 20
dynamicRequestSizeInMegabytes = 100
dynamicRequestSizeCustomMetric = 10
port = 80
targetPort = 8080
timeoutRC = 120 * time.Second
startServiceTimeout = time.Minute
startServiceInterval = 5 * time.Second
resourceConsumerImage = "gcr.io/google_containers/resource_consumer:beta4"
resourceConsumerControllerImage = "gcr.io/google_containers/resource_consumer/controller:beta4"
rcIsNil = "ERROR: replicationController = nil"
deploymentIsNil = "ERROR: deployment = nil"
rsIsNil = "ERROR: replicaset = nil"
invalidKind = "ERROR: invalid workload kind for resource consumer"
customMetricName = "QPS"
)
/*
ResourceConsumer is a tool for testing. It helps create specified usage of CPU or memory (Warning: memory not supported)
typical use case:
rc.ConsumeCPU(600)
// ... check your assumption here
rc.ConsumeCPU(300)
// ... check your assumption here
*/
type ResourceConsumer struct {
name string
controllerName string
kind string
framework *framework.Framework
cpu chan int
mem chan int
customMetric chan int
stopCPU chan int
stopMem chan int
stopCustomMetric chan int
consumptionTimeInSeconds int
sleepTime time.Duration
requestSizeInMillicores int
requestSizeInMegabytes int
requestSizeCustomMetric int
}
func NewDynamicResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
return newResourceConsumer(name, kind, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, dynamicConsumptionTimeInSeconds,
dynamicRequestSizeInMillicores, dynamicRequestSizeInMegabytes, dynamicRequestSizeCustomMetric, cpuLimit, memLimit, f)
}
// TODO this still defaults to replication controller
func NewStaticResourceConsumer(name string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
return newResourceConsumer(name, kindRC, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, staticConsumptionTimeInSeconds,
initCPUTotal/replicas, initMemoryTotal/replicas, initCustomMetric/replicas, cpuLimit, memLimit, f)
}
/*
NewResourceConsumer creates new ResourceConsumer
initCPUTotal argument is in millicores
initMemoryTotal argument is in megabytes
memLimit argument is in megabytes, memLimit is a maximum amount of memory that can be consumed by a single pod
cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can be consumed by a single pod
*/
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{
name: name,
controllerName: name + "-ctrl",
kind: kind,
framework: f,
cpu: make(chan int),
mem: make(chan int),
customMetric: make(chan int),
stopCPU: make(chan int),
stopMem: make(chan int),
stopCustomMetric: make(chan int),
consumptionTimeInSeconds: consumptionTimeInSeconds,
sleepTime: time.Duration(consumptionTimeInSeconds) * time.Second,
requestSizeInMillicores: requestSizeInMillicores,
requestSizeInMegabytes: requestSizeInMegabytes,
requestSizeCustomMetric: requestSizeCustomMetric,
}
go rc.makeConsumeCPURequests()
rc.ConsumeCPU(initCPUTotal)
go rc.makeConsumeMemRequests()
rc.ConsumeMem(initMemoryTotal)
go rc.makeConsumeCustomMetric()
rc.ConsumeCustomMetric(initCustomMetric)
return rc
}
// ConsumeCPU consumes given number of CPU
func (rc *ResourceConsumer) ConsumeCPU(millicores int) {
framework.Logf("RC %s: consume %v millicores in total", rc.name, millicores)
rc.cpu <- millicores
}
// ConsumeMem consumes given number of Mem
func (rc *ResourceConsumer) ConsumeMem(megabytes int) {
framework.Logf("RC %s: consume %v MB in total", rc.name, megabytes)
rc.mem <- megabytes
}
// ConsumeMem consumes given number of custom metric
func (rc *ResourceConsumer) ConsumeCustomMetric(amount int) {
framework.Logf("RC %s: consume custom metric %v in total", rc.name, amount)
rc.customMetric <- amount
}
func (rc *ResourceConsumer) makeConsumeCPURequests() {
defer GinkgoRecover()
sleepTime := time.Duration(0)
millicores := 0
for {
select {
case millicores = <-rc.cpu:
framework.Logf("RC %s: setting consumption to %v millicores in total", rc.name, millicores)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d millicores", rc.name, millicores)
rc.sendConsumeCPURequest(millicores)
sleepTime = rc.sleepTime
case <-rc.stopCPU:
return
}
}
}
func (rc *ResourceConsumer) makeConsumeMemRequests() {
defer GinkgoRecover()
sleepTime := time.Duration(0)
megabytes := 0
for {
select {
case megabytes = <-rc.mem:
framework.Logf("RC %s: setting consumption to %v MB in total", rc.name, megabytes)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d MB", rc.name, megabytes)
rc.sendConsumeMemRequest(megabytes)
sleepTime = rc.sleepTime
case <-rc.stopMem:
return
}
}
}
func (rc *ResourceConsumer) makeConsumeCustomMetric() {
defer GinkgoRecover()
sleepTime := time.Duration(0)
delta := 0
for {
select {
case delta := <-rc.customMetric:
framework.Logf("RC %s: setting bump of metric %s to %d in total", rc.name, customMetricName, delta)
case <-time.After(sleepTime):
framework.Logf("RC %s: sending request to consume %d of custom metric %s", rc.name, delta, customMetricName)
rc.sendConsumeCustomMetric(delta)
sleepTime = rc.sleepTime
case <-rc.stopCustomMetric:
return
}
}
}
func (rc *ResourceConsumer) sendConsumeCPURequest(millicores int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeCPU").
Param("millicores", strconv.Itoa(millicores)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMillicores", strconv.Itoa(rc.requestSizeInMillicores))
framework.Logf("URL: %v", *req.URL())
_, err = req.DoRaw()
framework.ExpectNoError(err)
}
// sendConsumeMemRequest sends POST request for memory consumption
func (rc *ResourceConsumer) sendConsumeMemRequest(megabytes int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Context(ctx).
Name(rc.controllerName).
Suffix("ConsumeMem").
Param("megabytes", strconv.Itoa(megabytes)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMegabytes", strconv.Itoa(rc.requestSizeInMegabytes))
framework.Logf("URL: %v", *req.URL())
_, err = req.DoRaw()
framework.ExpectNoError(err)
}
// sendConsumeCustomMetric sends POST request for custom metric consumption
func (rc *ResourceConsumer) sendConsumeCustomMetric(delta int) {
proxyRequest, err := framework.GetServicesProxyRequest(rc.framework.ClientSet, rc.framework.ClientSet.Core().RESTClient().Post())
framework.ExpectNoError(err)
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
req := proxyRequest.Namespace(rc.framework.Namespace.Name).
Context(ctx).
Name(rc.controllerName).
Suffix("BumpMetric").
Param("metric", customMetricName).
Param("delta", strconv.Itoa(delta)).
Param("durationSec", strconv.Itoa(rc.consumptionTimeInSeconds)).
Param("requestSizeMetrics", strconv.Itoa(rc.requestSizeCustomMetric))
framework.Logf("URL: %v", *req.URL())
_, err = req.DoRaw()
framework.ExpectNoError(err)
}
func (rc *ResourceConsumer) GetReplicas() int {
switch rc.kind {
case kindRC:
replicationController, err := rc.framework.ClientSet.Core().ReplicationControllers(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if replicationController == nil {
framework.Failf(rcIsNil)
}
return int(replicationController.Status.Replicas)
case kindDeployment:
deployment, err := rc.framework.ClientSet.Extensions().Deployments(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if deployment == nil {
framework.Failf(deploymentIsNil)
}
return int(deployment.Status.Replicas)
case kindReplicaSet:
rs, err := rc.framework.ClientSet.Extensions().ReplicaSets(rc.framework.Namespace.Name).Get(rc.name, metav1.GetOptions{})
framework.ExpectNoError(err)
if rs == nil {
framework.Failf(rsIsNil)
}
return int(rs.Status.Replicas)
default:
framework.Failf(invalidKind)
}
return 0
}
func (rc *ResourceConsumer) WaitForReplicas(desiredReplicas int) {
timeout := 15 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
if desiredReplicas == rc.GetReplicas() {
framework.Logf("%s: current replicas number is equal to desired replicas number: %d", rc.kind, desiredReplicas)
return
} else {
framework.Logf("%s: current replicas number %d waiting to be %d", rc.kind, rc.GetReplicas(), desiredReplicas)
}
}
framework.Failf("timeout waiting %v for pods size to be %d", timeout, desiredReplicas)
}
func (rc *ResourceConsumer) EnsureDesiredReplicas(desiredReplicas int, timeout time.Duration) {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
actual := rc.GetReplicas()
if desiredReplicas != actual {
framework.Failf("Number of replicas has changed: expected %v, got %v", desiredReplicas, actual)
}
framework.Logf("Number of replicas is as expected")
}
framework.Logf("Number of replicas was stable over %v", timeout)
}
func (rc *ResourceConsumer) CleanUp() {
By(fmt.Sprintf("Removing consuming RC %s", rc.name))
close(rc.stopCPU)
close(rc.stopMem)
close(rc.stopCustomMetric)
// Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second)
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.name))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.controllerName))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
}
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.Core().Services(ns).Create(&v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": name,
},
},
})
framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{
Client: c,
InternalClient: internalClient,
Image: resourceConsumerImage,
Name: name,
Namespace: ns,
Timeout: timeoutRC,
Replicas: replicas,
CpuRequest: cpuLimitMillis,
CpuLimit: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024,
}
switch kind {
case kindRC:
framework.ExpectNoError(framework.RunRC(rcConfig))
break
case kindDeployment:
dpConfig := testutils.DeploymentConfig{
RCConfig: rcConfig,
}
framework.ExpectNoError(framework.RunDeployment(dpConfig))
break
case kindReplicaSet:
rsConfig := testutils.ReplicaSetConfig{
RCConfig: rcConfig,
}
By(fmt.Sprintf("creating replicaset %s in namespace %s", rsConfig.Name, rsConfig.Namespace))
framework.ExpectNoError(framework.RunReplicaSet(rsConfig))
break
default:
framework.Failf(invalidKind)
}
By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
_, err = c.Core().Services(ns).Create(&v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: controllerName,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
Selector: map[string]string{
"name": controllerName,
},
},
})
framework.ExpectNoError(err)
dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{
Client: c,
Image: resourceConsumerControllerImage,
Name: controllerName,
Namespace: ns,
Timeout: timeoutRC,
Replicas: 1,
Command: []string{"/controller", "--consumer-service-name=" + name, "--consumer-service-namespace=" + ns, "--consumer-port=80"},
DNSPolicy: &dnsClusterFirst,
}
framework.ExpectNoError(framework.RunRC(controllerRcConfig))
// Wait for endpoints to propagate for the controller service.
framework.ExpectNoError(framework.WaitForServiceEndpointsNum(
c, ns, controllerName, 1, startServiceInterval, startServiceTimeout))
}

87
vendor/k8s.io/kubernetes/test/e2e/cadvisor.go generated vendored Normal file
View file

@ -0,0 +1,87 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Cadvisor", func() {
f := framework.NewDefaultFramework("cadvisor")
It("should be healthy on every node.", func() {
CheckCadvisorHealthOnAllNodes(f.ClientSet, 5*time.Minute)
})
})
func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here.
By("getting list of nodes")
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
var errors []error
// returns maxRetries, sleepDuration
readConfig := func() (int, time.Duration) {
// Read in configuration settings, reasonable defaults.
retry := framework.TestContext.Cadvisor.MaxRetries
if framework.TestContext.Cadvisor.MaxRetries == 0 {
retry = 6
framework.Logf("Overriding default retry value of zero to %d", retry)
}
sleepDurationMS := framework.TestContext.Cadvisor.SleepDurationMS
if sleepDurationMS == 0 {
sleepDurationMS = 10000
framework.Logf("Overriding default milliseconds value of zero to %d", sleepDurationMS)
}
return retry, time.Duration(sleepDurationMS) * time.Millisecond
}
maxRetries, sleepDuration := readConfig()
for {
errors = []error{}
for _, node := range nodeList.Items {
// cadvisor is not accessible directly unless its port (4194 by default) is exposed.
// Here, we access '/stats/' REST endpoint on the kubelet which polls cadvisor internally.
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
By(fmt.Sprintf("Querying stats from node %s using url %s", node.Name, statsResource))
_, err = c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(timeout).Do().Raw()
if err != nil {
errors = append(errors, err)
}
}
if len(errors) == 0 {
return
}
if maxRetries--; maxRetries <= 0 {
break
}
framework.Logf("failed to retrieve kubelet stats -\n %v", errors)
time.Sleep(sleepDuration)
}
framework.Failf("Failed after retrying %d times for cadvisor to be healthy on all nodes. Errors:\n%v", maxRetries, errors)
}

36
vendor/k8s.io/kubernetes/test/e2e/chaosmonkey/BUILD generated vendored Normal file
View file

@ -0,0 +1,36 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["chaosmonkey.go"],
tags = ["automanaged"],
deps = ["//vendor:github.com/onsi/ginkgo"],
)
go_test(
name = "go_default_test",
srcs = ["chaosmonkey_test.go"],
library = ":go_default_library",
tags = ["automanaged"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,153 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package chaosmonkey
import . "github.com/onsi/ginkgo"
// Disruption is the type to construct a chaosmonkey with; see Do for more information.
type Disruption func()
// Test is the type to register with a chaosmonkey. A test will run asynchronously across the
// chaosmonkey's Disruption. A Test takes a Semaphore as an argument. It should call sem.Ready()
// once it's ready for the disruption to start and should then wait until sem.StopCh (which is a
// <-chan struct{}) is closed, which signals that the disruption is over. It should then clean up
// and return. See Do and Semaphore for more information.
type Test func(sem *Semaphore)
// Interface can be implemented if you prefer to define tests without dealing with a Semaphore. You
// may define a struct that implements Interface's three methods (Setup, Test, and Teardown) and
// RegisterInterface. See RegisterInterface for more information.
type Interface interface {
Setup()
Test(stopCh <-chan struct{})
Teardown()
}
type chaosmonkey struct {
disruption Disruption
tests []Test
}
// New creates and returns a chaosmonkey, with which the caller should register Tests and call Do.
// See Do for more information.
func New(disruption Disruption) *chaosmonkey {
return &chaosmonkey{
disruption,
[]Test{},
}
}
// Register registers the given Test with the chaosmonkey, so that the test will run over the
// Disruption.
func (cm *chaosmonkey) Register(test Test) {
cm.tests = append(cm.tests, test)
}
// RegisterInterface registers the given Interface with the chaosmonkey, so the chaosmonkey will
// call Setup, Test, and Teardown properly. Test can tell that the Disruption is finished when
// stopCh is closed.
func (cm *chaosmonkey) RegisterInterface(in Interface) {
cm.Register(func(sem *Semaphore) {
in.Setup()
sem.Ready()
in.Test(sem.StopCh)
in.Teardown()
})
}
// Do performs the Disruption while testing the registered Tests. Once the caller has registered
// all Tests with the chaosmonkey, they call Do. Do starts each registered test asynchronously and
// waits for each test to signal that it is ready by calling sem.Ready(). Do will then do the
// Disruption, and when it's complete, close sem.StopCh to signal to the registered Tests that the
// Disruption is over, and wait for all Tests to return.
func (cm *chaosmonkey) Do() {
sems := []*Semaphore{}
// All semaphores have the same StopCh.
stopCh := make(chan struct{})
for _, test := range cm.tests {
test := test
sem := newSemaphore(stopCh)
sems = append(sems, sem)
go func() {
defer GinkgoRecover()
defer sem.done()
test(sem)
}()
}
By("Waiting for all async tests to be ready")
for _, sem := range sems {
// Wait for test to be ready. We have to wait for ready *or done* because a test
// may panic before signaling that its ready, and we shouldn't block. Since we
// defered sem.done() above, if a test panics, it's marked as done.
sem.waitForReadyOrDone()
}
By("Starting disruption")
cm.disruption()
By("Disruption complete; stopping async validations")
close(stopCh)
By("Waiting for async validations to complete")
for _, sem := range sems {
sem.waitForDone()
}
}
// Semaphore is taken by a Test and provides: Ready(), for the Test to call when it's ready for the
// disruption to start; and StopCh, the closure of which signals to the Test that the disruption is
// finished.
type Semaphore struct {
readyCh chan struct{}
StopCh <-chan struct{}
doneCh chan struct{}
}
func newSemaphore(stopCh <-chan struct{}) *Semaphore {
// We don't want to block on Ready() or done()
return &Semaphore{
make(chan struct{}, 1),
stopCh,
make(chan struct{}, 1),
}
}
// Ready is called by the Test to signal that the Test is ready for the disruption to start.
func (sem *Semaphore) Ready() {
close(sem.readyCh)
}
// done is an internal method for Go to defer, both to wait for all tests to return, but also to
// sense if a test panicked before calling Ready. See waitForReadyOrDone.
func (sem *Semaphore) done() {
close(sem.doneCh)
}
// We would like to just check if all tests are ready, but if they fail (which Ginkgo implements as
// a panic), they may not have called Ready(). We check done as well to see if the function has
// already returned; if it has, we don't care if it's ready, and just continue.
func (sem *Semaphore) waitForReadyOrDone() {
select {
case <-sem.readyCh:
case <-sem.doneCh:
}
}
// waitForDone is an internal method for Go to wait on all Tests returning.
func (sem *Semaphore) waitForDone() {
<-sem.doneCh
}

View file

@ -0,0 +1,53 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package chaosmonkey
import (
"sync/atomic"
"testing"
)
func TestDoWithPanic(t *testing.T) {
var counter int64 = 0
cm := New(func() {})
tests := []Test{
// No panic
func(sem *Semaphore) {
defer atomic.AddInt64(&counter, 1)
sem.Ready()
},
// Panic after sem.Ready()
func(sem *Semaphore) {
defer atomic.AddInt64(&counter, 1)
sem.Ready()
panic("Panic after calling sem.Ready()")
},
// Panic before sem.Ready()
func(sem *Semaphore) {
defer atomic.AddInt64(&counter, 1)
panic("Panic before calling sem.Ready()")
},
}
for _, test := range tests {
cm.Register(test)
}
cm.Do()
// Check that all funcs in tests were called.
if int(counter) != len(tests) {
t.Errorf("Expected counter to be %v, but it was %v", len(tests), counter)
}
}

290
vendor/k8s.io/kubernetes/test/e2e/cluster_logging_es.go generated vendored Normal file
View file

@ -0,0 +1,290 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"encoding/json"
"fmt"
"strconv"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// graceTime is how long to keep retrying requesting elasticsearch for status information.
graceTime = 5 * time.Minute
)
var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Feature:Elasticsearch]", func() {
f := framework.NewDefaultFramework("es-logging")
BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// on GCE. Once we are sure that Elasticsearch cluster level logging
// works for other providers we should widen this scope of this test.
framework.SkipUnlessProviderIs("gce")
})
It("should check that logs from containers are ingested into Elasticsearch", func() {
err := checkElasticsearchReadiness(f)
framework.ExpectNoError(err, "Elasticsearch failed to start")
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
By("Waiting for logs to ingest")
totalMissing := expectedLinesCount
for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(ingestionRetryDelay) {
totalMissing, err = getMissingLinesCountElasticsearch(f, expectedLinesCount)
if err != nil {
framework.Logf("Failed to get missing lines count due to %v", err)
totalMissing = expectedLinesCount
} else if totalMissing > 0 {
framework.Logf("Still missing %d lines", totalMissing)
}
if totalMissing == 0 {
break
}
}
if totalMissing > 0 {
if err := reportLogsFromFluentdPod(f); err != nil {
framework.Logf("Failed to report logs from fluentd pod due to %v", err)
}
}
Expect(totalMissing).To(Equal(0), "Some log lines are still missing")
})
})
// Ensures that elasticsearch is running and ready to serve requests
func checkElasticsearchReadiness(f *framework.Framework) error {
// Check for the existence of the Elasticsearch service.
By("Checking the Elasticsearch service exists.")
s := f.ClientSet.Core().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
var err error
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
if _, err = s.Get("elasticsearch-logging", metav1.GetOptions{}); err == nil {
break
}
framework.Logf("Attempt to check for the existence of the Elasticsearch service failed after %v", time.Since(start))
}
Expect(err).NotTo(HaveOccurred())
// Wait for the Elasticsearch pods to enter the running state.
By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"}))
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
Expect(err).NotTo(HaveOccurred())
}
By("Checking to make sure we are talking to an Elasticsearch service.")
// Perform a few checks to make sure this looks like an Elasticsearch cluster.
var statusCode int
err = nil
var body []byte
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(10 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
// Query against the root URL for Elasticsearch.
response := proxyRequest.Namespace(api.NamespaceSystem).
Context(ctx).
Name("elasticsearch-logging").
Do()
err = response.Error()
response.StatusCode(&statusCode)
if err != nil {
if ctx.Err() != nil {
framework.Failf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
continue
}
framework.Logf("After %v proxy call to elasticsearch-loigging failed: %v", time.Since(start), err)
continue
}
if int(statusCode) != 200 {
framework.Logf("After %v Elasticsearch cluster has a bad status: %v", time.Since(start), statusCode)
continue
}
break
}
Expect(err).NotTo(HaveOccurred())
if int(statusCode) != 200 {
framework.Failf("Elasticsearch cluster has a bad status: %v", statusCode)
}
// Now assume we really are talking to an Elasticsearch instance.
// Check the cluster health.
By("Checking health of Elasticsearch service.")
healthy := false
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
continue
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err = proxyRequest.Namespace(api.NamespaceSystem).
Context(ctx).
Name("elasticsearch-logging").
Suffix("_cluster/health").
Param("level", "indices").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to get cluster health from elasticsearch: %v", err)
}
continue
}
health := make(map[string]interface{})
err := json.Unmarshal(body, &health)
if err != nil {
framework.Logf("Bad json response from elasticsearch: %v", err)
continue
}
statusIntf, ok := health["status"]
if !ok {
framework.Logf("No status field found in cluster health response: %v", health)
continue
}
status := statusIntf.(string)
if status != "green" && status != "yellow" {
framework.Logf("Cluster health has bad status: %v", health)
continue
}
if err == nil && ok {
healthy = true
break
}
}
if !healthy {
return fmt.Errorf("After %v elasticsearch cluster is not healthy", graceTime)
}
return nil
}
func getMissingLinesCountElasticsearch(f *framework.Framework, expectedCount int) (int, error) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil {
return 0, fmt.Errorf("Failed to get services proxy request: %v", errProxy)
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
// Ask Elasticsearch to return all the log lines that were tagged with the
// pod name. Ask for ten times as many log lines because duplication is possible.
body, err := proxyRequest.Namespace(api.NamespaceSystem).
Context(ctx).
Name("elasticsearch-logging").
Suffix("_search").
// TODO: Change filter to only match records from current test run
// after fluent-plugin-kubernetes_metadata_filter is enabled
// and optimize current query
Param("q", fmt.Sprintf("tag:*%s*", synthLoggerPodName)).
Param("size", strconv.Itoa(expectedCount*10)).
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to make proxy call to elasticsearch-logging: %v", err)
}
return 0, fmt.Errorf("Failed to make proxy call to elasticsearch-logging: %v", err)
}
var response map[string]interface{}
err = json.Unmarshal(body, &response)
if err != nil {
return 0, fmt.Errorf("Failed to unmarshal response: %v", err)
}
hits, ok := response["hits"].(map[string]interface{})
if !ok {
return 0, fmt.Errorf("response[hits] not of the expected type: %T", response["hits"])
}
h, ok := hits["hits"].([]interface{})
if !ok {
return 0, fmt.Errorf("Hits not of the expected type: %T", hits["hits"])
}
// Initialize data-structure for observing counts.
counts := make(map[int]int)
// Iterate over the hits and populate the observed array.
for _, e := range h {
l, ok := e.(map[string]interface{})
if !ok {
framework.Logf("Element of hit not of expected type: %T", e)
continue
}
source, ok := l["_source"].(map[string]interface{})
if !ok {
framework.Logf("_source not of the expected type: %T", l["_source"])
continue
}
msg, ok := source["log"].(string)
if !ok {
framework.Logf("Log not of the expected type: %T", source["log"])
continue
}
lineNumber, err := strconv.Atoi(strings.TrimSpace(msg))
if err != nil {
framework.Logf("Log line %s is not a number", msg)
continue
}
if lineNumber < 0 || lineNumber >= expectedCount {
framework.Logf("Number %d is not valid, expected number from range [0, %d)", lineNumber, expectedCount)
continue
}
// Record the observation of a log line
// Duplicates are possible and fine, fluentd has at-least-once delivery
counts[lineNumber]++
}
return expectedCount - len(counts), nil
}

View file

@ -0,0 +1,135 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os/exec"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/json"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
f := framework.NewDefaultFramework("gcl-logging")
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
})
It("should check that logs from containers are ingested in GCL", func() {
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
By("Waiting for logs to ingest")
totalMissing := expectedLinesCount
for start := time.Now(); time.Since(start) < ingestionTimeout; time.Sleep(ingestionRetryDelay) {
var err error
totalMissing, err = getMissingLinesCountGcl(f, synthLoggerPodName, expectedLinesCount)
if err != nil {
framework.Logf("Failed to get missing lines count due to %v", err)
totalMissing = expectedLinesCount
} else if totalMissing > 0 {
framework.Logf("Still missing %d lines", totalMissing)
}
if totalMissing == 0 {
break
}
}
if totalMissing > 0 {
if err := reportLogsFromFluentdPod(f); err != nil {
framework.Logf("Failed to report logs from fluentd pod due to %v", err)
}
}
Expect(totalMissing).To(Equal(0), "Some log lines are still missing")
})
})
func getMissingLinesCountGcl(f *framework.Framework, podName string, expectedCount int) (int, error) {
gclFilter := fmt.Sprintf("resource.labels.pod_id:%s AND resource.labels.namespace_id:%s", podName, f.Namespace.Name)
entries, err := readFilteredEntriesFromGcl(gclFilter)
if err != nil {
return 0, err
}
occurrences := make(map[int]int)
for _, entry := range entries {
lineNumber, err := strconv.Atoi(strings.TrimSpace(entry))
if err != nil {
continue
}
if lineNumber < 0 || lineNumber >= expectedCount {
framework.Logf("Unexpected line number: %d", lineNumber)
} else {
// Duplicates are possible and fine, fluentd has at-least-once delivery
occurrences[lineNumber]++
}
}
return expectedCount - len(occurrences), nil
}
type LogEntry struct {
TextPayload string
}
// Since GCL API is not easily available from the outside of cluster
// we use gcloud command to perform search with filter
func readFilteredEntriesFromGcl(filter string) ([]string, error) {
framework.Logf("Reading entries from GCL with filter '%v'", filter)
argList := []string{"beta",
"logging",
"read",
filter,
"--format",
"json",
"--project",
framework.TestContext.CloudConfig.ProjectID,
}
output, err := exec.Command("gcloud", argList...).CombinedOutput()
if err != nil {
return nil, err
}
var entries []*LogEntry
if err = json.Unmarshal(output, &entries); err != nil {
return nil, err
}
framework.Logf("Read %d entries from GCL", len(entries))
var result []string
for _, entry := range entries {
if entry.TextPayload != "" {
result = append(result, entry.TextPayload)
}
}
return result, nil
}

View file

@ -0,0 +1,93 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"errors"
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// ingestionTimeout is how long to keep retrying to wait for all the
// logs to be ingested.
ingestionTimeout = 10 * time.Minute
// ingestionRetryDelay is how long test should wait between
// two attempts to check for ingestion
ingestionRetryDelay = 25 * time.Second
synthLoggerPodName = "synthlogger"
// expectedLinesCount is the number of log lines emitted (and checked) for each synthetic logging pod.
expectedLinesCount = 100
)
func createSynthLogger(f *framework.Framework, linesCount int) {
f.PodClient().Create(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: synthLoggerPodName,
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []v1.Container{
{
Name: synthLoggerPodName,
Image: "gcr.io/google_containers/busybox:1.24",
// notice: the subshell syntax is escaped with `$$`
Command: []string{"/bin/sh", "-c", fmt.Sprintf("i=0; while [ $i -lt %d ]; do echo $i; i=`expr $i + 1`; done", linesCount)},
},
},
},
})
}
func reportLogsFromFluentdPod(f *framework.Framework) error {
synthLoggerPod, err := f.PodClient().Get(synthLoggerPodName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Failed to get synth logger pod due to %v", err)
}
synthLoggerNodeName := synthLoggerPod.Spec.NodeName
if synthLoggerNodeName == "" {
return errors.New("Synthlogger pod is not assigned to the node")
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
options := v1.ListOptions{LabelSelector: label.String()}
fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
for _, fluentdPod := range fluentdPods.Items {
if fluentdPod.Spec.NodeName == synthLoggerNodeName {
containerName := fluentdPod.Spec.Containers[0].Name
logs, err := framework.GetPodLogs(f.ClientSet, api.NamespaceSystem, fluentdPod.Name, containerName)
if err != nil {
return fmt.Errorf("Failed to get logs from fluentd pod %s due to %v", fluentdPod.Name, err)
}
framework.Logf("Logs from fluentd pod %s:\n%s", fluentdPod.Name, logs)
return nil
}
}
return fmt.Errorf("Failed to find fluentd pod running on node %s", synthLoggerNodeName)
}

View file

@ -0,0 +1,640 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"bytes"
"fmt"
"io/ioutil"
"net/http"
"os/exec"
"strconv"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
defaultTimeout = 3 * time.Minute
resizeTimeout = 5 * time.Minute
scaleUpTimeout = 5 * time.Minute
scaleDownTimeout = 15 * time.Minute
gkeEndpoint = "https://test-container.sandbox.googleapis.com"
gkeUpdateTimeout = 15 * time.Minute
)
var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
f := framework.NewDefaultFramework("autoscaling")
var c clientset.Interface
var nodeCount int
var coresPerNode int
var memCapacityMb int
var originalSizes map[string]int
BeforeEach(func() {
c = f.ClientSet
framework.SkipUnlessProviderIs("gce", "gke")
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
originalSizes = make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := GroupSize(mig)
framework.ExpectNoError(err)
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
Expect(nodeCount).Should(Equal(sum))
if framework.ProviderIs("gke") {
val, err := isAutoscalerEnabled(3)
framework.ExpectNoError(err)
if !val {
err = enableAutoscaler("default-pool", 3, 5)
framework.ExpectNoError(err)
}
}
})
AfterEach(func() {
By(fmt.Sprintf("Restoring initial size of the cluster"))
setMigSizes(originalSizes)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount, scaleDownTimeout))
})
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen")
// Verfiy, that the appropreate event was generated.
eventFound := false
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
if e.InvolvedObject.Kind == "Pod" && e.Reason == "NotTriggerScaleUp" && strings.Contains(e.Message, "it wouldn't fit if a new node is added") {
By("NotTriggerScaleUp event found")
eventFound = true
break EventsLoop
}
}
}
Expect(eventFound).Should(Equal(true))
// Verify, that cluster size is not changed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size <= nodeCount }, time.Second))
})
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should increase cluster size if pending pods are small and there is another node pool that is not autoscaled [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with one n1-standard-4 machine")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should disable node pool autoscaling [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with one n1-standard-4 machine")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
framework.ExpectNoError(disableAutoscaler(extraPoolName, 1, 2))
})
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
})
It("should add node to the particular mig [Feature:ClusterSizeAutoscalingScaleUp]", func() {
labels := map[string]string{"cluster-autoscaling-test.special-node": "true"}
By("Finding the smallest MIG")
minMig := ""
minSize := nodeCount
for mig, size := range originalSizes {
if size <= minSize {
minMig = mig
minSize = size
}
}
removeLabels := func(nodesToClean sets.String) {
By("Removing labels from nodes")
updateNodeLabels(c, nodesToClean, nil, labels)
}
nodes, err := GetGroupNodes(minMig)
framework.ExpectNoError(err)
nodesSet := sets.NewString(nodes...)
defer removeLabels(nodesSet)
By(fmt.Sprintf("Annotating nodes of the smallest MIG(%s): %v", minMig, nodes))
updateNodeLabels(c, nodesSet, labels, nil)
CreateNodeSelectorPods(f, "node-selector", minSize+1, labels, false)
By("Waiting for new node to appear and annotating it")
WaitForGroupSize(minMig, int32(minSize+1))
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
newNodes, err := GetGroupNodes(minMig)
framework.ExpectNoError(err)
newNodesSet := sets.NewString(newNodes...)
newNodesSet.Delete(nodes...)
if len(newNodesSet) > 1 {
By(fmt.Sprintf("Spotted following new nodes in %s: %v", minMig, newNodesSet))
glog.Infof("Usually only 1 new node is expected, investigating")
glog.Infof("Kubectl:%s\n", framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
if output, err := exec.Command("gcloud", "compute", "instances", "list",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances list: %s", output)
} else {
glog.Errorf("Failed to get instances list: %v", err)
}
for newNode := range newNodesSet {
if output, err := exec.Command("gcloud", "compute", "instances", "describe",
newNode,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output(); err == nil {
glog.Infof("Gcloud compute instances describe: %s", output)
} else {
glog.Errorf("Failed to get instances describe: %v", err)
}
}
// TODO: possibly remove broken node from newNodesSet to prevent removeLabel from crashing.
// However at this moment we DO WANT it to crash so that we don't check all test runs for the
// rare behavior, but only the broken ones.
}
By(fmt.Sprintf("New nodes: %v\n", newNodesSet))
registeredNodes := sets.NewString()
for nodeName := range newNodesSet {
node, err := f.ClientSet.Core().Nodes().Get(nodeName, metav1.GetOptions{})
if err == nil && node != nil {
registeredNodes.Insert(nodeName)
} else {
glog.Errorf("Failed to get node %v: %v", nodeName, err)
}
}
By(fmt.Sprintf("Setting labels for registered new nodes: %v", registeredNodes.List()))
updateNodeLabels(c, registeredNodes, labels, nil)
defer removeLabels(registeredNodes)
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
})
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
framework.SkipUnlessProviderIs("gke")
By("Creating new node-pool with one n1-standard-4 machine")
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-4", 1)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+1, resizeTimeout))
framework.ExpectNoError(enableAutoscaler(extraPoolName, 1, 2))
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(framework.WaitForClusterSize(c, nodeCount+2, scaleUpTimeout+5*time.Minute))
})
It("should correctly scale down after a node is not needed [Feature:ClusterSizeAutoscalingScaleDown]", func() {
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2
increasedSize += val + 2
}
setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
By("Some node should be removed")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize }, scaleDownTimeout))
})
It("should correctly scale down after a node is not needed when there is non autoscaled pool[Feature:ClusterSizeAutoscalingScaleDown]", func() {
framework.SkipUnlessProviderIs("gke")
By("Manually increase cluster size")
increasedSize := 0
newSizes := make(map[string]int)
for key, val := range originalSizes {
newSizes[key] = val + 2
increasedSize += val + 2
}
setMigSizes(newSizes)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize }, scaleUpTimeout))
const extraPoolName = "extra-pool"
addNodePool(extraPoolName, "n1-standard-1", 3)
defer deleteNodePool(extraPoolName)
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= increasedSize+3 }, scaleUpTimeout))
By("Some node should be removed")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 10 extra minutes to workaround
// this issue.
// TODO: Remove the extra time when GKE restart is fixed.
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size < increasedSize+3 }, scaleDownTimeout+10*time.Minute))
})
})
func getGKEClusterUrl() string {
out, err := exec.Command("gcloud", "auth", "print-access-token").Output()
framework.ExpectNoError(err)
token := strings.Replace(string(out), "\n", "", -1)
return fmt.Sprintf("%s/v1/projects/%s/zones/%s/clusters/%s?access_token=%s",
gkeEndpoint,
framework.TestContext.CloudConfig.ProjectID,
framework.TestContext.CloudConfig.Zone,
framework.TestContext.CloudConfig.Cluster,
token)
}
func isAutoscalerEnabled(expectedMinNodeCountInTargetPool int) (bool, error) {
resp, err := http.Get(getGKEClusterUrl())
if err != nil {
return false, err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return false, err
}
strBody := string(body)
glog.Infof("Cluster config %s", strBody)
if strings.Contains(strBody, "\"minNodeCount\": "+strconv.Itoa(expectedMinNodeCountInTargetPool)) {
return true, nil
}
return false, nil
}
func enableAutoscaler(nodePool string, minCount, maxCount int) error {
if nodePool == "default-pool" {
glog.Infof("Using gcloud to enable autoscaling for pool %s", nodePool)
output, err := exec.Command("gcloud", "alpha", "container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--enable-autoscaling",
"--min-nodes="+strconv.Itoa(minCount),
"--max-nodes="+strconv.Itoa(maxCount),
"--node-pool="+nodePool,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output()
if err != nil {
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
} else {
glog.Infof("Using direct api access to enable autoscaling for pool %s", nodePool)
updateRequest := "{" +
" \"update\": {" +
" \"desiredNodePoolId\": \"" + nodePool + "\"," +
" \"desiredNodePoolAutoscaling\": {" +
" \"enabled\": \"true\"," +
" \"minNodeCount\": \"" + strconv.Itoa(minCount) + "\"," +
" \"maxNodeCount\": \"" + strconv.Itoa(maxCount) + "\"" +
" }" +
" }" +
"}"
url := getGKEClusterUrl()
glog.Infof("Using gke api url %s", url)
putResult, err := doPut(url, updateRequest)
if err != nil {
return fmt.Errorf("Failed to put %s: %v", url, err)
}
glog.Infof("Config update result: %s", putResult)
}
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
if val, err := isAutoscalerEnabled(minCount); err == nil && val {
return nil
}
}
return fmt.Errorf("autoscaler not enabled")
}
func disableAutoscaler(nodePool string, minCount, maxCount int) error {
if nodePool == "default-pool" {
glog.Infof("Using gcloud to disable autoscaling for pool %s", nodePool)
output, err := exec.Command("gcloud", "alpha", "container", "clusters", "update", framework.TestContext.CloudConfig.Cluster,
"--no-enable-autoscaling",
"--node-pool="+nodePool,
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone).Output()
if err != nil {
return fmt.Errorf("Failed to enable autoscaling: %v", err)
}
glog.Infof("Config update result: %s", output)
} else {
glog.Infof("Using direct api access to disable autoscaling for pool %s", nodePool)
updateRequest := "{" +
" \"update\": {" +
" \"desiredNodePoolId\": \"" + nodePool + "\"," +
" \"desiredNodePoolAutoscaling\": {" +
" \"enabled\": \"false\"," +
" }" +
" }" +
"}"
url := getGKEClusterUrl()
glog.Infof("Using gke api url %s", url)
putResult, err := doPut(url, updateRequest)
if err != nil {
return fmt.Errorf("Failed to put %s: %v", url, err)
}
glog.Infof("Config update result: %s", putResult)
}
for startTime := time.Now(); startTime.Add(gkeUpdateTimeout).After(time.Now()); time.Sleep(30 * time.Second) {
if val, err := isAutoscalerEnabled(minCount); err == nil && !val {
return nil
}
}
return fmt.Errorf("autoscaler still enabled")
}
func addNodePool(name string, machineType string, numNodes int) {
output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create", name, "--quiet",
"--machine-type="+machineType,
"--num-nodes="+strconv.Itoa(numNodes),
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone,
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
framework.ExpectNoError(err)
glog.Infof("Creating node-pool %s: %s", name, output)
}
func deleteNodePool(name string) {
glog.Infof("Deleting node pool %s", name)
output, err := exec.Command("gcloud", "alpha", "container", "node-pools", "delete", name, "--quiet",
"--project="+framework.TestContext.CloudConfig.ProjectID,
"--zone="+framework.TestContext.CloudConfig.Zone,
"--cluster="+framework.TestContext.CloudConfig.Cluster).CombinedOutput()
if err != nil {
glog.Infof("Error: %v", err)
}
glog.Infof("Node-pool deletion output: %s", output)
}
func doPut(url, content string) (string, error) {
req, err := http.NewRequest("PUT", url, bytes.NewBuffer([]byte(content)))
req.Header.Set("Content-Type", "application/json")
client := &http.Client{}
resp, err := client.Do(req)
if err != nil {
return "", err
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", err
}
strBody := string(body)
return strBody, nil
}
func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nodeSelector map[string]string, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
NodeSelector: nodeSelector,
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}
func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: id,
Namespace: f.Namespace.Name,
Timeout: defaultTimeout,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: replicas,
MemRequest: request,
}
err := framework.RunRC(*config)
if expectRunning {
framework.ExpectNoError(err)
}
}
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
}.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
}
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
if numNodes == numReady && sizeFunc(numReady) {
glog.Infof("Cluster has reached the desired size")
return nil
}
glog.Infof("Waiting for cluster, current size %d, not ready nodes %d", numNodes, numNodes-numReady)
}
return fmt.Errorf("timeout waiting %v for appropriate cluster size", timeout)
}
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
notready = make([]string, 0)
for _, pod := range pods.Items {
ready := false
for _, c := range pod.Status.Conditions {
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true
}
}
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == v1.PodFailed {
glog.Warningf("Pod has failed: %v", pod)
}
if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
if len(notready) == 0 {
glog.Infof("All pods ready")
return nil
}
glog.Infof("Some pods are not ready yet: %v", notready)
}
glog.Info("Timeout on waiting for pods being ready")
glog.Info(framework.RunKubectlOrDie("get", "pods", "-o", "json", "--all-namespaces"))
glog.Info(framework.RunKubectlOrDie("get", "nodes", "-o", "json"))
// Some pods are still not running.
return fmt.Errorf("Some pods are still not running: %v", notready)
}
func setMigSizes(sizes map[string]int) {
for mig, desiredSize := range sizes {
currentSize, err := GroupSize(mig)
framework.ExpectNoError(err)
if desiredSize != currentSize {
By(fmt.Sprintf("Setting size of %s to %d", mig, desiredSize))
err = ResizeGroup(mig, int32(desiredSize))
framework.ExpectNoError(err)
}
}
}

230
vendor/k8s.io/kubernetes/test/e2e/cluster_upgrade.go generated vendored Normal file
View file

@ -0,0 +1,230 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"path"
"strings"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// TODO(mikedanese): Add setup, validate, and teardown for:
// - secrets
// - volumes
// - persistent volumes
var _ = framework.KubeDescribe("Upgrade [Feature:Upgrade]", func() {
f := framework.NewDefaultFramework("cluster-upgrade")
framework.KubeDescribe("master upgrade", func() {
It("should maintain responsive services [Feature:MasterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
cm.Do()
})
})
framework.KubeDescribe("node upgrade", func() {
It("should maintain a functioning cluster [Feature:NodeUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalNodeUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
cm.Do()
})
})
framework.KubeDescribe("cluster upgrade", func() {
It("should maintain a functioning cluster [Feature:ClusterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceUpBeforeAndAfter(f, sem)
})
cm.Do()
})
It("should maintain responsive services [Feature:ExperimentalClusterUpgrade]", func() {
cm := chaosmonkey.New(func() {
v, err := realVersion(framework.TestContext.UpgradeTarget)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.MasterUpgrade(v))
framework.ExpectNoError(checkMasterVersion(f.ClientSet, v))
framework.ExpectNoError(framework.NodeUpgrade(f, v, framework.TestContext.UpgradeImage))
framework.ExpectNoError(checkNodesVersions(f.ClientSet, v))
})
cm.Register(func(sem *chaosmonkey.Semaphore) {
// Close over f.
testServiceRemainsUp(f, sem)
})
cm.Do()
})
})
})
// realVersion turns a version constant s into a version string deployable on
// GKE. See hack/get-build.sh for more information.
func realVersion(s string) (string, error) {
framework.Logf(fmt.Sprintf("Getting real version for %q", s))
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
if err != nil {
return v, err
}
framework.Logf("Version for %q is %q", s, v)
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil
}
func testServiceUpBeforeAndAfter(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testService(f, sem, false)
}
func testServiceRemainsUp(f *framework.Framework, sem *chaosmonkey.Semaphore) {
testService(f, sem, true)
}
// testService is a helper for testServiceUpBeforeAndAfter and testServiceRemainsUp with a flag for testDuringDisruption
//
// TODO(ihmccreery) remove this abstraction once testServiceUpBeforeAndAfter is no longer needed, because node upgrades
// maintain a responsive service.
func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringDisruption bool) {
// Setup
serviceName := "service-test"
jig := framework.NewServiceTestJig(f.ClientSet, serviceName)
// nodeIP := framework.PickNodeIP(jig.Client) // for later
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
// TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes
// tcpService.
tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeLoadBalancer
})
tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, framework.LoadBalancerCreateTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with
tcpIngressIP := framework.GetIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
svcPort := int(tcpService.Spec.Ports[0].Port)
By("creating pod to be part of service " + serviceName)
// TODO newRCTemplate only allows for the creation of one replica... that probably won't
// work so well.
jig.RunOrFail(f.Namespace.Name, nil)
// Hit it once before considering ourselves ready
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
sem.Ready()
if testDuringDisruption {
// Continuous validation
wait.Until(func() {
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.Poll)
}, framework.Poll, sem.StopCh)
} else {
// Block until chaosmonkey is done
By("waiting for upgrade to finish without checking if service remains up")
<-sem.StopCh
}
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, framework.LoadBalancerLagTimeoutDefault)
jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
}
func checkMasterVersion(c clientset.Interface, want string) error {
framework.Logf("Checking master version")
v, err := c.Discovery().ServerVersion()
if err != nil {
return fmt.Errorf("checkMasterVersion() couldn't get the master version: %v", err)
}
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// got looks like: v0.19.3-815-g50e67d4034e858-dirty
got := strings.TrimPrefix(v.GitVersion, "v")
if !strings.HasPrefix(got, want) {
return fmt.Errorf("master had kube-apiserver version %s which does not start with %s",
got, want)
}
framework.Logf("Master is at version %s", want)
return nil
}
func checkNodesVersions(cs clientset.Interface, want string) error {
l := framework.GetReadySchedulableNodesOrDie(cs)
for _, n := range l.Items {
// We do prefix trimming and then matching because:
// want looks like: 0.19.3-815-g50e67d4
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
if !strings.HasPrefix(kv, want) {
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
n.ObjectMeta.Name, kv, want)
}
if !strings.HasPrefix(kpv, want) {
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
n.ObjectMeta.Name, kpv, want)
}
}
return nil
}

70
vendor/k8s.io/kubernetes/test/e2e/common/BUILD generated vendored Normal file
View file

@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"configmap.go",
"container_probe.go",
"docker_containers.go",
"downward_api.go",
"downwardapi_volume.go",
"empty_dir.go",
"expansion.go",
"host_path.go",
"init_container.go",
"kubelet_etc_hosts.go",
"networking.go",
"pods.go",
"privileged.go",
"secrets.go",
"sysctl.go",
"util.go",
"volumes.go",
],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/resource:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/pod:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/kubelet:go_default_library",
"//pkg/kubelet/events:go_default_library",
"//pkg/kubelet/sysctl:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/uuid:go_default_library",
"//test/e2e/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/gomega",
"//vendor:golang.org/x/net/websocket",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/apimachinery/pkg/watch",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

495
vendor/k8s.io/kubernetes/test/e2e/common/configmap.go generated vendored Normal file
View file

@ -0,0 +1,495 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("ConfigMap", func() {
f := framework.NewDefaultFramework("configmap")
It("should be consumable from pods in volume [Conformance] [Volume]", func() {
doConfigMapE2EWithoutMappings(f, 0, 0, nil)
})
It("should be consumable from pods in volume with defaultMode set [Conformance] [Volume]", func() {
defaultMode := int32(0400)
doConfigMapE2EWithoutMappings(f, 0, 0, &defaultMode)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Feature:FSGroup] [Volume]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
doConfigMapE2EWithoutMappings(f, 1000, 1001, &defaultMode)
})
It("should be consumable from pods in volume as non-root [Conformance] [Volume]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume as non-root with FSGroup [Feature:FSGroup] [Volume]", func() {
doConfigMapE2EWithoutMappings(f, 1000, 1001, nil)
})
It("should be consumable from pods in volume with mappings [Conformance] [Volume]", func() {
doConfigMapE2EWithMappings(f, 0, 0, nil)
})
It("should be consumable from pods in volume with mappings and Item mode set[Conformance] [Volume]", func() {
mode := int32(0400)
doConfigMapE2EWithMappings(f, 0, 0, &mode)
})
It("should be consumable from pods in volume with mappings as non-root [Conformance] [Volume]", func() {
doConfigMapE2EWithMappings(f, 1000, 0, nil)
})
It("should be consumable from pods in volume with mappings as non-root with FSGroup [Feature:FSGroup] [Volume]", func() {
doConfigMapE2EWithMappings(f, 1000, 1001, nil)
})
It("updates should be reflected in volume [Conformance] [Volume]", func() {
// We may have to wait or a full sync period to elapse before the
// Kubelet projects the update into the volume and the container picks
// it up. This timeout is based on the default Kubelet sync period (1
// minute) plus additional time for fudge factor.
const podLogTimeout = 300 * time.Second
name := "configmap-test-upd-" + string(uuid.NewUUID())
volumeName := "configmap-volume"
volumeMountPath := "/etc/configmap-volume"
containerName := "configmap-volume-test"
configMap := &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
},
}
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
f.PodClient().CreateSync(pod)
pollLogs := func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
}
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-1"))
By(fmt.Sprintf("Updating configmap %v", configMap.Name))
configMap.ResourceVersion = "" // to force update
configMap.Data["data-1"] = "value-2"
_, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Update(configMap)
Expect(err).NotTo(HaveOccurred(), "Failed to update configmap %q in namespace %q", configMap.Name, f.Namespace.Name)
By("waiting to observe update in volume")
Eventually(pollLogs, podLogTimeout, framework.Poll).Should(ContainSubstring("value-2"))
})
It("should be consumable via environment variable [Conformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"CONFIG_DATA_1=value-1",
})
})
It("should be consumable via the environment [Conformance]", func() {
name := "configmap-test-" + string(uuid.NewUUID())
configMap := newEnvFromConfigMap(f, name)
By(fmt.Sprintf("Creating configMap %v/%v", f.Namespace.Name, configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
EnvFrom: []v1.EnvFromSource{
{
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
{
Prefix: "p_",
ConfigMapRef: &v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: name}},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"data_1=value-1", "data_2=value-2", "data_3=value-3",
"p_data_1=value-1", "p_data_2=value-2", "p_data_3=value-3",
})
})
It("should be consumable in multiple volumes in the same pod [Conformance] [Volume]", func() {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
volumeName2 = "configmap-volume-2"
volumeMountPath2 = "/etc/configmap-volume-2"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume configMaps", pod, 0, []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
})
})
})
func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data-1": "value-1",
"data-2": "value-2",
"data-3": "value-3",
},
}
}
func newEnvFromConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string]string{
"data_1": "value-1",
"data_2": "value-2",
"data_3": "value-3",
},
}
}
func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, defaultMode *int32) {
var (
name = "configmap-test-volume-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if uid != 0 {
pod.Spec.SecurityContext.RunAsUser = &uid
}
if fsGroup != 0 {
pod.Spec.SecurityContext.FSGroup = &fsGroup
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
output := []string{
"content of file \"/etc/configmap-volume/data-1\": value-1",
"mode of file \"/etc/configmap-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
var (
name = "configmap-test-volume-map-" + string(uuid.NewUUID())
volumeName = "configmap-volume"
volumeMountPath = "/etc/configmap-volume"
configMap = newConfigMap(f, name)
)
By(fmt.Sprintf("Creating configMap with name %s", configMap.Name))
var err error
if configMap, err = f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap); err != nil {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Items: []v1.KeyToPath{
{
Key: "data-2",
Path: "path/to/data-2",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if uid != 0 {
pod.Spec.SecurityContext.RunAsUser = &uid
}
if fsGroup != 0 {
pod.Spec.SecurityContext.FSGroup = &fsGroup
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.ConfigMap.Items[0].Mode = itemMode
} else {
mode := int32(0644)
itemMode = &mode
}
// Just check file mode if fsGroup is not set. If fsGroup is set, the
// final mode is adjusted and we are not testing that case.
output := []string{
"content of file \"/etc/configmap-volume/path/to/data-2\": value-2",
}
if fsGroup == 0 {
modeString := fmt.Sprintf("%v", os.FileMode(*itemMode))
output = append(output, "mode of file \"/etc/configmap-volume/path/to/data-2\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
}

View file

@ -0,0 +1,406 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
probTestContainerName = "test-webserver"
probTestInitialDelaySeconds = 15
defaultObservationTimeout = time.Minute * 2
)
var _ = framework.KubeDescribe("Probing container", func() {
f := framework.NewDefaultFramework("container-probe")
var podClient *framework.PodClient
probe := webserverProbeBuilder{}
BeforeEach(func() {
podClient = f.PodClient()
})
It("with readiness probe should not be ready before initial delay and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withInitialDelay().build(), nil))
f.WaitForPodReady(p.Name)
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
framework.ExpectNoError(err)
Expect(isReady).To(BeTrue(), "pod should be ready")
// We assume the pod became ready when the container became ready. This
// is true for a single container pod.
readyTime, err := getTransitionTimeForReadyCondition(p)
framework.ExpectNoError(err)
startedTime, err := getContainerStartedTime(p, probTestContainerName)
framework.ExpectNoError(err)
framework.Logf("Container started at %v, pod became ready at %v", startedTime, readyTime)
initialDelay := probTestInitialDelaySeconds * time.Second
if readyTime.Sub(startedTime) < initialDelay {
framework.Failf("Pod became ready before it's %v initial delay", initialDelay)
}
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("with readiness probe that fails should never be ready and never restart [Conformance]", func() {
p := podClient.Create(makePodSpec(probe.withFailing().build(), nil))
Consistently(func() (bool, error) {
p, err := podClient.Get(p.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
isReady, err := testutils.PodRunningReady(p)
Expect(isReady).NotTo(BeTrue(), "pod should be not ready")
restartCount := getRestartCount(p)
Expect(restartCount == 0).To(BeTrue(), "pod should have a restart count of 0 but got %v", restartCount)
})
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
// Slow by design (5 min)
It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 5, time.Minute*5)
})
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(80),
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 5,
FailureThreshold: 1,
},
},
},
},
}, 0, defaultObservationTimeout)
})
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
// TODO: enable this test once the default exec handler supports timeout.
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
runLivenessTest(f, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 10"},
},
},
InitialDelaySeconds: 15,
TimeoutSeconds: 1,
FailureThreshold: 1,
},
},
},
},
}, 1, defaultObservationTimeout)
})
})
func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName {
continue
}
if status.State.Running == nil {
return time.Time{}, fmt.Errorf("Container is not running")
}
return status.State.Running.StartedAt.Time, nil
}
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
}
func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions {
if cond.Type == v1.PodReady {
return cond.LastTransitionTime.Time, nil
}
}
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
}
func getRestartCount(p *v1.Pod) int {
count := 0
for _, containerStatus := range p.Status.ContainerStatuses {
count += int(containerStatus.RestartCount)
}
return count
}
func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: probTestContainerName,
Image: "gcr.io/google_containers/test-webserver:e2e",
LivenessProbe: livenessProbe,
ReadinessProbe: readinessProbe,
},
},
},
}
return pod
}
type webserverProbeBuilder struct {
failing bool
initialDelay bool
}
func (b webserverProbeBuilder) withFailing() webserverProbeBuilder {
b.failing = true
return b
}
func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
b.initialDelay = true
return b
}
func (b webserverProbeBuilder) build() *v1.Probe {
probe := &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/",
},
},
}
if b.initialDelay {
probe.InitialDelaySeconds = probTestInitialDelaySeconds
}
if b.failing {
probe.HTTPGet.Port = intstr.FromInt(81)
}
return probe
}
func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
containerName := pod.Spec.Containers[0].Name
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
// Wait until the pod is not pending. (Here we need to check for something other than
// 'Pending' other than checking for 'Running', since when failures occur, we go to
// 'Terminated' which can cause indefinite blocking.)
framework.ExpectNoError(framework.WaitForPodNotPending(f.ClientSet, ns, pod.Name, pod.ResourceVersion),
fmt.Sprintf("starting pod %s in namespace %s", pod.Name, ns))
framework.Logf("Started pod %s in namespace %s", pod.Name, ns)
// Check the pod's current state and verify that restartCount is present.
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
deadline := time.Now().Add(timeout)
lastRestartCount := initialRestartCount
observedRestarts := int32(0)
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
if restartCount < lastRestartCount {
framework.Failf("Restart count should increment monotonically: restart cont of pod %s/%s changed from %d to %d",
ns, pod.Name, lastRestartCount, restartCount)
}
}
observedRestarts = restartCount - initialRestartCount
if expectNumRestarts > 0 && int(observedRestarts) >= expectNumRestarts {
// Stop if we have observed more than expectNumRestarts restarts.
break
}
lastRestartCount = restartCount
}
// If we expected 0 restarts, fail if observed any restart.
// If we expected n restarts (n > 0), fail if we observed < n restarts.
if (expectNumRestarts == 0 && observedRestarts > 0) || (expectNumRestarts > 0 &&
int(observedRestarts) < expectNumRestarts) {
framework.Failf("pod %s/%s - expected number of restarts: %d, found restarts: %d",
ns, pod.Name, expectNumRestarts, observedRestarts)
}
}

View file

@ -0,0 +1,87 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Docker Containers", func() {
f := framework.NewDefaultFramework("containers")
It("should use the image defaults if command and args are blank [Conformance]", func() {
f.TestContainerOutput("use defaults", entrypointTestPod(), 0, []string{
"[/ep default arguments]",
})
})
It("should be able to override the image's default arguments (docker cmd) [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override arguments", pod, 0, []string{
"[/ep override arguments]",
})
})
// Note: when you override the entrypoint, the image's arguments (docker cmd)
// are ignored.
It("should be able to override the image's default commmand (docker entrypoint) [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
f.TestContainerOutput("override command", pod, 0, []string{
"[/ep-2]",
})
})
It("should be able to override the image's default command and arguments [Conformance]", func() {
pod := entrypointTestPod()
pod.Spec.Containers[0].Command = []string{"/ep-2"}
pod.Spec.Containers[0].Args = []string{"override", "arguments"}
f.TestContainerOutput("override all", pod, 0, []string{
"[/ep-2 override arguments]",
})
})
})
const testContainerName = "test-container"
// Return a prototypical entrypoint test pod
func entrypointTestPod() *v1.Pod {
podName := "client-containers-" + string(uuid.NewUUID())
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: testContainerName,
Image: "gcr.io/google_containers/eptest:0.1",
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
}

View file

@ -0,0 +1,211 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Downward API", func() {
f := framework.NewDefaultFramework("downward-api")
It("should provide pod name and namespace as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
},
{
Name: "POD_NAMESPACE",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
},
},
}
expectations := []string{
fmt.Sprintf("POD_NAME=%v", podName),
fmt.Sprintf("POD_NAMESPACE=%v", f.Namespace.Name),
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide pod IP as an env var [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
},
},
}
expectations := []string{
"POD_IP=(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)\\.(?:\\d+)",
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
{
Name: "CPU_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.cpu",
},
},
},
{
Name: "MEMORY_REQUEST",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.memory",
},
},
},
}
expectations := []string{
fmt.Sprintf("CPU_LIMIT=2"),
fmt.Sprintf("MEMORY_LIMIT=67108864"),
fmt.Sprintf("CPU_REQUEST=1"),
fmt.Sprintf("MEMORY_REQUEST=33554432"),
}
testDownwardAPI(f, podName, env, expectations)
})
It("should provide default limits.cpu/memory from node allocatable [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
}
expectations := []string{
fmt.Sprintf("CPU_LIMIT=[1-9]"),
fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
})
})
func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
Env: env,
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
}
func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}

View file

@ -0,0 +1,417 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Downward API volume", func() {
// How long to wait for a log pod to be displayed
const podLogTimeout = 2 * time.Minute
f := framework.NewDefaultFramework("downward-api")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should provide podname only [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should set DefaultMode on files [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
defaultMode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", nil, &defaultMode)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should set mode on item file [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
mode := int32(0400)
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--------",
})
})
It("should provide podname as non-root with fsgroup [Feature:FSGroup] [Volume]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("%s\n", podName),
})
})
It("should provide podname as non-root with fsgroup and defaultMode [Feature:FSGroup] [Volume]", func() {
podName := "metadata-volume-" + string(uuid.NewUUID())
uid := int64(1001)
gid := int64(1234)
mode := int32(0440) /* setting fsGroup sets mode to at least 440 */
pod := downwardAPIVolumePodForModeTest(podName, "/etc/podname", &mode, nil)
pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
"mode of file \"/etc/podname\": -r--r-----",
})
})
It("should update labels on modification [Conformance] [Volume]", func() {
labels := map[string]string{}
labels["key1"] = "value1"
labels["key2"] = "value2"
podName := "labelsupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, labels, map[string]string{}, "/etc/labels")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("key3=\"value3\"\n"))
})
It("should update annotations on modification [Conformance] [Volume]", func() {
annotations := map[string]string{}
annotations["builder"] = "bar"
podName := "annotationupdate" + string(uuid.NewUUID())
pod := downwardAPIVolumePodForUpdateTest(podName, map[string]string{}, annotations, "/etc/annotations")
containerName := "client-container"
By("Creating the pod")
podClient.CreateSync(pod)
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
Eventually(func() (string, error) {
return framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName)
},
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"foo\"\n"))
})
It("should provide container's cpu limit [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("2\n"),
})
})
It("should provide container's memory limit [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("67108864\n"),
})
})
It("should provide container's cpu request [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/cpu_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("1\n"),
})
})
It("should provide container's memory request [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForContainerResources(podName, "/etc/memory_request")
f.TestContainerOutput("downward API volume plugin", pod, 0, []string{
fmt.Sprintf("33554432\n"),
})
})
It("should provide node allocatable (cpu) as default cpu limit if the limit is not set [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/cpu_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
It("should provide node allocatable (memory) as default memory limit if the limit is not set [Conformance] [Volume]", func() {
podName := "downwardapi-volume-" + string(uuid.NewUUID())
pod := downwardAPIVolumeForDefaultContainerResources(podName, "/etc/memory_limit")
f.TestContainerOutputRegexp("downward API volume plugin", pod, 0, []string{"[1-9]"})
})
})
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_mode=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
if itemMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.Items[0].Mode = itemMode
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.DownwardAPI.DefaultMode = defaultMode
}
return pod
}
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
return pod
}
func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod
}
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod
}
func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("250m"),
v1.ResourceMemory: resource.MustParse("32Mi"),
},
Limits: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("1250m"),
v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
}
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
},
},
},
}
}
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
ReadOnly: false,
},
},
},
}
applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations, pod)
return pod
}
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "podinfo",
VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
})
}
if len(annotations) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations",
FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
})
}
}
// TODO: add test-webserver example as pointed out in https://github.com/kubernetes/kubernetes/pull/5093#discussion-diff-37606771

352
vendor/k8s.io/kubernetes/test/e2e/common/empty_dir.go generated vendored Normal file
View file

@ -0,0 +1,352 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"path"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
testImageRootUid = "gcr.io/google_containers/mounttest:0.7"
testImageNonRootUid = "gcr.io/google_containers/mounttest-user:0.3"
)
var _ = framework.KubeDescribe("EmptyDir volumes", func() {
f := framework.NewDefaultFramework("emptydir")
Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root [Volume]", func() {
doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("new files should be created with FSGroup ownership when container is non-root [Volume]", func() {
doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("files with FSGroup ownership should support (root,0644,tmpfs) [Volume]", func() {
doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode using FSGroup [Volume]", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
})
It("volume on tmpfs should have the correct mode using FSGroup [Volume]", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
})
It("volume on tmpfs should have the correct mode [Conformance] [Volume]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0644,tmpfs) [Conformance] [Volume]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0666,tmpfs) [Conformance] [Volume]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0777,tmpfs) [Conformance] [Volume]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0644,tmpfs) [Conformance] [Volume]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0666,tmpfs) [Conformance] [Volume]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0777,tmpfs) [Conformance] [Volume]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode [Conformance] [Volume]", func() {
doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0644,default) [Conformance] [Volume]", func() {
doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0666,default) [Conformance] [Volume]", func() {
doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0777,default) [Conformance] [Volume]", func() {
doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0644,default) [Conformance] [Volume]", func() {
doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0666,default) [Conformance] [Volume]", func() {
doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0777,default) [Conformance] [Volume]", func() {
doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
})
})
const (
containerName = "test-container"
volumeName = "test-volume"
)
func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0660=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
fmt.Sprintf("--file_owner=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw----",
"content of file \"/test-volume/test-file\": mount-tester new file",
"owner GID of \"/test-volume/test-file\": 123",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
fsGroup := int64(1001)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
fsGroup := int64(123)
pod.Spec.SecurityContext.FSGroup = &fsGroup
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_perm=%v", volumePath),
}
msg := fmt.Sprintf("emptydir volume type on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0644 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0666=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0666 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--new_file_0777=%v", filePath),
fmt.Sprintf("--file_perm=%v", filePath),
}
msg := fmt.Sprintf("emptydir 0777 on %v", formatMedium(medium))
out := []string{
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
func formatMedium(medium v1.StorageMedium) string {
if medium == v1.StorageMediumMemory {
return "tmpfs"
}
return "node default medium"
}
func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
podName := "pod-" + string(uuid.NewUUID())
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
EmptyDir: source,
},
},
},
},
}
}

132
vendor/k8s.io/kubernetes/test/e2e/common/expansion.go generated vendored Normal file
View file

@ -0,0 +1,132 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
// These tests exercise the Kubernetes expansion syntax $(VAR).
// For more information, see: docs/design/expansion.md
var _ = framework.KubeDescribe("Variable Expansion", func() {
f := framework.NewDefaultFramework("var-expansion")
It("should allow composing env vars into new env vars [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "FOO",
Value: "foo-value",
},
{
Name: "BAR",
Value: "bar-value",
},
{
Name: "FOOBAR",
Value: "$(FOO);;$(BAR)",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("env composition", pod, 0, []string{
"FOO=foo-value",
"BAR=bar-value",
"FOOBAR=foo-value;;bar-value",
})
})
It("should allow substituting values in a container's command [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's command", pod, 0, []string{
"test-value",
})
})
It("should allow substituting values in a container's args [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("substitution in container's args", pod, 0, []string{
"test-value",
})
})
})

178
vendor/k8s.io/kubernetes/test/e2e/common/host_path.go generated vendored Normal file
View file

@ -0,0 +1,178 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"path"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
//TODO : Consolidate this code with the code for emptyDir.
//This will require some smart.
var _ = framework.KubeDescribe("HostPath", func() {
f := framework.NewDefaultFramework("hostpath")
BeforeEach(func() {
//cleanup before running the test.
_ = os.Remove("/tmp/test-file")
})
It("should give a volume the correct mode [Conformance] [Volume]", func() {
volumePath := "/test-volume"
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--fs_type=%v", volumePath),
fmt.Sprintf("--file_mode=%v", volumePath),
}
f.TestContainerOutput("hostPath mode", pod, 0, []string{
"mode of file \"/test-volume\": dtrwxrwxrwx", // we expect the sticky bit (mode flag t) to be set for the dir
})
})
// This test requires mounting a folder into a container with write privileges.
It("should support r/w [Volume]", func() {
volumePath := "/test-volume"
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
pod.Spec.Containers[0].Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePath),
fmt.Sprintf("--file_mode=%v", filePath),
}
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePath),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
//Read the content of the file with the second container to
//verify volumes being shared properly among containers within the pod.
f.TestContainerOutput("hostPath r/w", pod, 1, []string{
"content of file \"/test-volume/test-file\": mount-tester new file",
})
})
It("should support subPath [Volume]", func() {
volumePath := "/test-volume"
subPath := "sub-path"
fileName := "test-file"
retryDuration := 180
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
// Write the file in the subPath from container 0
container := &pod.Spec.Containers[0]
container.VolumeMounts[0].SubPath = subPath
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", filePathInWriter),
fmt.Sprintf("--file_mode=%v", filePathInWriter),
}
// Read it from outside the subPath from container 1
pod.Spec.Containers[1].Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", filePathInReader),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
f.TestContainerOutput("hostPath subPath", pod, 1, []string{
"content of file \"" + filePathInReader + "\": mount-tester new file",
})
})
})
//These constants are borrowed from the other test.
//const volumeName = "test-volume"
const containerName1 = "test-container-1"
const containerName2 = "test-container-2"
func mount(source *v1.HostPathVolumeSource) []v1.Volume {
return []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: source,
},
},
}
}
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
podName := "pod-host-path-test"
privileged := true
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName1,
Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
{
Name: containerName2,
Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: mount(source),
},
}
}

View file

@ -0,0 +1,408 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"strconv"
"time"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("InitContainer", func() {
f := framework.NewDefaultFramework("init-container")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should invoke init containers on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should invoke init containers on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionTrue))
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
Expect(status.Ready).To(BeTrue())
Expect(status.State.Terminated).NotTo(BeNil())
Expect(status.State.Terminated.ExitCode).To(BeZero())
}
})
It("should not start app containers if init containers fail on a RestartAlways pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/false"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[1]
if status.State.Waiting == nil {
return false, fmt.Errorf("second init container should not be out of waiting: %#v", status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
status = t.Status.InitContainerStatuses[0]
if status.State.Terminated != nil && status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
// continue until we see an attempt to restart the pod
return status.LastTerminationState.Terminated != nil, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
// verify we get two restarts
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
status := t.Status.InitContainerStatuses[0]
if status.RestartCount < 3 {
return false, nil
}
framework.Logf("init container has failed twice: %#v", t)
// TODO: more conditions
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
})
It("should not start app containers and fail the pod if init containers fail on a RestartNever pod", func() {
framework.SkipIfContainerRuntimeIs("rkt") // #25988
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyNever,
InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
},
{
Name: "init2",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/false"},
},
},
Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
Resources: v1.ResourceRequirements{
Limits: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod)
w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
event, err := watch.Until(
framework.PodStartTimeout, wr,
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
}
if status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("container %q should have reason PodInitializing: %#v", status.Name, status)
}
}
if len(t.Status.InitContainerStatuses) != 2 {
return false, nil
}
status := t.Status.InitContainerStatuses[0]
if status.State.Terminated == nil {
if status.State.Waiting != nil && status.State.Waiting.Reason != "PodInitializing" {
return false, fmt.Errorf("second init container should have reason PodInitializing: %#v", status)
}
return false, nil
}
if status.State.Terminated != nil && status.State.Terminated.ExitCode != 0 {
return false, fmt.Errorf("first init container should have exitCode != 0: %#v", status)
}
status = t.Status.InitContainerStatuses[1]
if status.State.Terminated == nil {
return false, nil
}
if status.State.Terminated.ExitCode == 0 {
return false, fmt.Errorf("second init container should have failed: %#v", status)
}
return true, nil
default:
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
conditions.PodCompleted,
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
_, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
Expect(endPod.Status.ContainerStatuses[0].State.Waiting).ToNot(BeNil())
})
})

View file

@ -0,0 +1,218 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"strings"
"time"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
etcHostsImageName = "gcr.io/google_containers/netexec:1.7"
etcHostsPodName = "test-pod"
etcHostsHostNetworkPodName = "test-host-network-pod"
etcHostsPartialContent = "# Kubernetes-managed hosts file."
)
type KubeletManagedHostConfig struct {
hostNetworkPod *v1.Pod
pod *v1.Pod
f *framework.Framework
}
var _ = framework.KubeDescribe("KubeletManagedEtcHosts", func() {
f := framework.NewDefaultFramework("e2e-kubelet-etc-hosts")
config := &KubeletManagedHostConfig{
f: f,
}
It("should test kubelet managed /etc/hosts file [Conformance]", func() {
By("Setting up the test")
config.setup()
By("Running the test")
config.verifyEtcHosts()
})
})
func (config *KubeletManagedHostConfig) verifyEtcHosts() {
By("Verifying /etc/hosts of container is kubelet-managed for pod with hostNetwork=false")
assertManagedStatus(config, etcHostsPodName, true, "busybox-1")
assertManagedStatus(config, etcHostsPodName, true, "busybox-2")
By("Verifying /etc/hosts of container is not kubelet-managed since container specifies /etc/hosts mount")
assertManagedStatus(config, etcHostsPodName, false, "busybox-3")
By("Verifying /etc/hosts content of container is not kubelet-managed for pod with hostNetwork=true")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-1")
assertManagedStatus(config, etcHostsHostNetworkPodName, false, "busybox-2")
}
func (config *KubeletManagedHostConfig) setup() {
By("Creating hostNetwork=false pod")
config.createPodWithoutHostNetwork()
By("Creating hostNetwork=true pod")
config.createPodWithHostNetwork()
}
func (config *KubeletManagedHostConfig) createPodWithoutHostNetwork() {
podSpec := config.createPodSpec(etcHostsPodName)
config.pod = config.f.PodClient().CreateSync(podSpec)
}
func (config *KubeletManagedHostConfig) createPodWithHostNetwork() {
podSpec := config.createPodSpecWithHostNetwork(etcHostsHostNetworkPodName)
config.hostNetworkPod = config.f.PodClient().CreateSync(podSpec)
}
func assertManagedStatus(
config *KubeletManagedHostConfig, podName string, expectedIsManaged bool, name string) {
// TODO: workaround for https://github.com/kubernetes/kubernetes/issues/34256
//
// Retry until timeout for the contents of /etc/hosts to show
// up. Note: if /etc/hosts is properly mounted, then this will
// succeed immediately.
const retryTimeout = 30 * time.Second
retryCount := 0
etcHostsContent := ""
for startTime := time.Now(); time.Since(startTime) < retryTimeout; {
etcHostsContent = config.getEtcHostsContent(podName, name)
isManaged := strings.Contains(etcHostsContent, etcHostsPartialContent)
if expectedIsManaged == isManaged {
return
}
glog.Warningf(
"For pod: %s, name: %s, expected %t, actual %t (/etc/hosts was %q), retryCount: %d",
podName, name, expectedIsManaged, isManaged, etcHostsContent, retryCount)
retryCount++
time.Sleep(100 * time.Millisecond)
}
if expectedIsManaged {
framework.Failf(
"/etc/hosts file should be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
} else {
framework.Failf(
"/etc/hosts file should no be kubelet managed (name: %s, retries: %d). /etc/hosts contains %q",
name, retryCount, etcHostsContent)
}
}
func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerName string) string {
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
}
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-3",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
VolumeMounts: []v1.VolumeMount{
{
Name: "host-etc-hosts",
MountPath: "/etc/hosts",
},
},
},
},
Volumes: []v1.Volume{
{
Name: "host-etc-hosts",
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts",
},
},
},
},
},
}
return pod
}
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
HostNetwork: true,
SecurityContext: &v1.PodSecurityContext{},
Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
{
Name: "busybox-2",
Image: etcHostsImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
},
},
},
}
return pod
}

61
vendor/k8s.io/kubernetes/test/e2e/common/networking.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
. "github.com/onsi/ginkgo"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
)
var _ = framework.KubeDescribe("Networking", func() {
f := framework.NewDefaultFramework("pod-network-test")
framework.KubeDescribe("Granular Checks: Pods", func() {
// Try to hit all endpoints through a test container, retry 5 times,
// expect exactly one unique hostname. Each of these endpoints reports
// its own hostname.
It("should function for intra-pod communication: http [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for intra-pod communication: udp [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromTestContainer("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for node-pod communication: http [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("http", endpointPod.Status.PodIP, framework.EndpointHttpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
It("should function for node-pod communication: udp [Conformance]", func() {
config := framework.NewCoreNetworkingTestConfig(f)
for _, endpointPod := range config.EndpointPods {
config.DialFromNode("udp", endpointPod.Status.PodIP, framework.EndpointUdpPort, config.MaxTries, 0, sets.NewString(endpointPod.Name))
}
})
})
})

661
vendor/k8s.io/kubernetes/test/e2e/common/pods.go generated vendored Normal file
View file

@ -0,0 +1,661 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"bytes"
"fmt"
"io"
"strconv"
"strings"
"time"
"golang.org/x/net/websocket"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
buildBackOffDuration = time.Minute
syncLoopFrequency = 10 * time.Second
maxBackOffTolerance = time.Duration(1.3 * float64(kubelet.MaxContainerBackOff))
)
// testHostIP tests that a pod gets a host IP
func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
By("creating pod")
podClient.CreateSync(pod)
// Try to make sure we get a hostIP for each pod.
hostIPTimeout := 2 * time.Minute
t := time.Now()
for {
p, err := podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "Failed to get pod %q", pod.Name)
if p.Status.HostIP != "" {
framework.Logf("Pod %s has hostIP: %s", p.Name, p.Status.HostIP)
break
}
if time.Since(t) >= hostIPTimeout {
framework.Failf("Gave up waiting for hostIP of pod %s after %v seconds",
p.Name, time.Since(t).Seconds())
}
framework.Logf("Retrying to get the hostIP of pod %s", p.Name)
time.Sleep(5 * time.Second)
}
}
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
Expect(pod.Spec.Containers).NotTo(BeEmpty())
podName := pod.Name
containerName := pod.Spec.Containers[0].Name
By("getting restart delay-0")
_, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-1")
delay1, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
By("getting restart delay-2")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
return delay1, delay2
}
func getRestartDelay(podClient *framework.PodClient, podName string, containerName string) (time.Duration, error) {
beginTime := time.Now()
for time.Since(beginTime) < (2 * maxBackOffTolerance) { // may just miss the 1st MaxContainerBackOff delay
time.Sleep(time.Second)
pod, err := podClient.Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
framework.Logf("getRestartDelay: status missing")
continue
}
if status.State.Waiting == nil && status.State.Running != nil && status.LastTerminationState.Terminated != nil && status.State.Running.StartedAt.Time.After(beginTime) {
startedAt := status.State.Running.StartedAt.Time
finishedAt := status.LastTerminationState.Terminated.FinishedAt.Time
framework.Logf("getRestartDelay: restartCount = %d, finishedAt=%s restartedAt=%s (%s)", status.RestartCount, finishedAt, startedAt, startedAt.Sub(finishedAt))
return startedAt.Sub(finishedAt), nil
}
}
return 0, fmt.Errorf("timeout getting pod restart delay")
}
var _ = framework.KubeDescribe("Pods", func() {
f := framework.NewDefaultFramework("pods")
var podClient *framework.PodClient
BeforeEach(func() {
podClient = f.PodClient()
})
It("should get a host IP [Conformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.ClientSet),
},
},
},
})
})
It("should be submitted and removed [Conformance]", func() {
By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
},
},
},
}
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
options = v1.ListOptions{
LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
Expect(err).NotTo(HaveOccurred(), "failed to set up watch")
By("submitting the pod to kubernetes")
podClient.Create(pod)
By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe pod creation: %v", event)
}
case <-time.After(framework.PodStartTimeout):
Fail("Timeout while waiting for pod creation")
}
// We need to wait for the pod to be running, otherwise the deletion
// may be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
// save the running pod
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to GET scheduled pod")
framework.Logf("running pod: %#v", pod)
By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
By("verifying the kubelet observed the termination notice")
Expect(wait.Poll(time.Second*5, time.Second*30, func() (bool, error) {
podList, err := framework.GetKubeletPods(f.ClientSet, pod.Spec.NodeName)
if err != nil {
framework.Logf("Unable to retrieve kubelet pods for node %v: %v", pod.Spec.NodeName, err)
return false, nil
}
for _, kubeletPod := range podList.Items {
if pod.Name != kubeletPod.Name {
continue
}
if kubeletPod.ObjectMeta.DeletionTimestamp == nil {
framework.Logf("deletion has not yet been observed")
return false, nil
}
return true, nil
}
framework.Logf("no pod exists with the name we were looking for, assuming the termination request was observed and completed")
return true, nil
})).NotTo(HaveOccurred(), "kubelet never observed the termination notice")
By("verifying pod deletion was observed")
deleted := false
timeout := false
var lastPod *v1.Pod
timer := time.After(30 * time.Second)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
lastPod = event.Object.(*v1.Pod)
deleted = true
}
case <-timer:
timeout = true
}
}
if !deleted {
Fail("Failed to observe pod deletion")
}
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
})
It("should be updated [Conformance]", func() {
By("creating the pod")
name := "pod-update-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
},
},
},
}
By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
})
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
framework.Logf("Pod update OK")
})
It("should allow activeDeadlineSeconds to be updated [Conformance]", func() {
By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
},
},
},
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
podClient.Update(name, func(pod *v1.Pod) {
newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline
})
framework.ExpectNoError(f.WaitForPodTerminated(pod.Name, "DeadlineExceeded"))
})
It("should contain environment variables for services [Conformance]", func() {
// Make a pod that will be a service.
// This pod serves its hostname via HTTP.
serverName := "server-envvars-" + string(uuid.NewUUID())
serverPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: serverName,
Labels: map[string]string{"name": serverName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "srv",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
}
podClient.CreateSync(serverPod)
// This service exposes port 8080 of the test pod as a service on port 8765
// TODO(filbranden): We would like to use a unique service name such as:
// svcName := "svc-envvars-" + randomSuffix()
// However, that affects the name of the environment variables which are the capitalized
// service name, so that breaks this test. One possibility is to tweak the variable names
// to match the service. Another is to rethink environment variable names and possibly
// allow overriding the prefix in the service manifest.
svcName := "fooservice"
svc := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: svcName,
Labels: map[string]string{
"name": svcName,
},
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: 8765,
TargetPort: intstr.FromInt(8080),
}},
Selector: map[string]string{
"name": serverName,
},
},
}
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(svc)
Expect(err).NotTo(HaveOccurred(), "failed to create service")
// Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID())
const containerName = "env3cont"
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
// It's possible for the Pod to be created before the Kubelet is updated with the new
// service. In that case, we just retry.
const maxRetries = 3
expectedVars := []string{
"FOOSERVICE_SERVICE_HOST=",
"FOOSERVICE_SERVICE_PORT=",
"FOOSERVICE_PORT=",
"FOOSERVICE_PORT_8765_TCP_PORT=",
"FOOSERVICE_PORT_8765_TCP_PROTO=",
"FOOSERVICE_PORT_8765_TCP=",
"FOOSERVICE_PORT_8765_TCP_ADDR=",
}
framework.ExpectNoErrorWithRetries(func() error {
return f.MatchContainerOutput(pod, containerName, expectedVars, ContainSubstring)
}, maxRetries, "Container should have service environment variables set")
})
It("should support remote command execution over websockets", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "main",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 600"},
},
},
},
}
By("submitting the pod to kubernetes")
pod = podClient.CreateSync(pod)
req := f.ClientSet.Core().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).
Suffix("exec").
Param("stderr", "1").
Param("stdout", "1").
Param("container", pod.Spec.Containers[0].Name).
Param("command", "cat").
Param("command", "/etc/resolv.conf")
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"channel.k8s.io"})
if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
buf := &bytes.Buffer{}
Eventually(func() error {
for {
var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil {
if err == io.EOF {
break
}
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(msg) == 0 {
continue
}
if msg[0] != 1 {
framework.Failf("Got message from server that didn't start with channel 1 (STDOUT): %v", msg)
}
buf.Write(msg[1:])
}
if buf.Len() == 0 {
return fmt.Errorf("Unexpected output from server")
}
if !strings.Contains(buf.String(), "nameserver") {
return fmt.Errorf("Expected to find 'nameserver' in %q", buf.String())
}
return nil
}, time.Minute, 10*time.Second).Should(BeNil())
})
It("should support retrieving logs from the container over websockets", func() {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred(), "unable to get base config")
By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "main",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo container is alive; sleep 10000"},
},
},
},
}
By("submitting the pod to kubernetes")
podClient.CreateSync(pod)
req := f.ClientSet.Core().RESTClient().Get().
Namespace(f.Namespace.Name).
Resource("pods").
Name(pod.Name).
Suffix("log").
Param("container", pod.Spec.Containers[0].Name)
url := req.URL()
ws, err := framework.OpenWebSocketForURL(url, config, []string{"binary.k8s.io"})
if err != nil {
framework.Failf("Failed to open websocket to %s: %v", url.String(), err)
}
defer ws.Close()
buf := &bytes.Buffer{}
for {
var msg []byte
if err := websocket.Message.Receive(ws, &msg); err != nil {
if err == io.EOF {
break
}
framework.Failf("Failed to read completely from websocket %s: %v", url.String(), err)
}
if len(strings.TrimSpace(string(msg))) == 0 {
continue
}
buf.Write(msg)
}
if buf.String() != "container is alive\n" {
framework.Failf("Unexpected websocket logs:\n%s", buf.String())
}
})
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
podName := "pod-back-off-image"
containerName := "back-off"
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "back-off-image"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
},
}
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
By("updating the image")
podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7"
})
time.Sleep(syncLoopFrequency)
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("get restart delay after image update")
delayAfterUpdate, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delayAfterUpdate > 2*delay2 || delayAfterUpdate > 2*delay1 {
framework.Failf("updating image did not reset the back-off value in pod=%s/%s d3=%s d2=%s d1=%s", podName, containerName, delayAfterUpdate, delay1, delay2)
}
})
// Slow issue #19027 (20 mins)
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
podName := "back-off-cap"
containerName := "back-off-cap"
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "liveness"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 5", "/crash/missing"},
},
},
},
}
podClient.CreateSync(pod)
time.Sleep(2 * kubelet.MaxContainerBackOff) // it takes slightly more than 2*x to get to a back-off of x
// wait for a delay == capped delay of MaxContainerBackOff
By("geting restart delay when capped")
var (
delay1 time.Duration
err error
)
for i := 0; i < 3; i++ {
delay1, err = getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay1 < kubelet.MaxContainerBackOff {
continue
}
}
if (delay1 < kubelet.MaxContainerBackOff) || (delay1 > maxBackOffTolerance) {
framework.Failf("expected %s back-off got=%s in delay1", kubelet.MaxContainerBackOff, delay1)
}
By("getting restart delay after a capped delay")
delay2, err := getRestartDelay(podClient, podName, containerName)
if err != nil {
framework.Failf("timed out waiting for container restart in pod=%s/%s", podName, containerName)
}
if delay2 < kubelet.MaxContainerBackOff || delay2 > maxBackOffTolerance { // syncloop cumulative drift
framework.Failf("expected %s back-off got=%s on delay2", kubelet.MaxContainerBackOff, delay2)
}
})
})

114
vendor/k8s.io/kubernetes/test/e2e/common/privileged.go generated vendored Normal file
View file

@ -0,0 +1,114 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
type PrivilegedPodTestConfig struct {
f *framework.Framework
privilegedPod string
privilegedContainer string
notPrivilegedContainer string
pod *v1.Pod
}
var _ = framework.KubeDescribe("PrivilegedPod", func() {
config := &PrivilegedPodTestConfig{
f: framework.NewDefaultFramework("e2e-privileged-pod"),
privilegedPod: "privileged-pod",
privilegedContainer: "privileged-container",
notPrivilegedContainer: "not-privileged-container",
}
It("should enable privileged commands", func() {
By("Creating a pod with a privileged container")
config.createPods()
By("Executing in the privileged container")
config.run(config.privilegedContainer, true)
By("Executing in the non-privileged container")
config.run(config.notPrivilegedContainer, false)
})
})
func (c *PrivilegedPodTestConfig) run(containerName string, expectSuccess bool) {
cmd := []string{"ip", "link", "add", "dummy1", "type", "dummy"}
reverseCmd := []string{"ip", "link", "del", "dummy1"}
stdout, stderr, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, cmd...)
msg := fmt.Sprintf("cmd %v, stdout %q, stderr %q", cmd, stdout, stderr)
if expectSuccess {
Expect(err).NotTo(HaveOccurred(), msg)
// We need to clean up the dummy link that was created, as it
// leaks out into the node level -- yuck.
_, _, err := c.f.ExecCommandInContainerWithFullOutput(
c.privilegedPod, containerName, reverseCmd...)
Expect(err).NotTo(HaveOccurred(),
fmt.Sprintf("could not remove dummy1 link: %v", err))
} else {
Expect(err).To(HaveOccurred(), msg)
}
}
func (c *PrivilegedPodTestConfig) createPodsSpec() *v1.Pod {
isPrivileged := true
notPrivileged := false
const image = "gcr.io/google_containers/busybox:1.24"
return &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: c.privilegedPod,
Namespace: c.f.Namespace.Name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: c.privilegedContainer,
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
{
Name: c.notPrivilegedContainer,
Image: image,
ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{"/bin/sleep", "10000"},
},
},
},
}
}
func (c *PrivilegedPodTestConfig) createPods() {
podSpec := c.createPodsSpec()
c.pod = c.f.PodClient().CreateSync(podSpec)
}

350
vendor/k8s.io/kubernetes/test/e2e/common/secrets.go generated vendored Normal file
View file

@ -0,0 +1,350 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
"os"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Secrets", func() {
f := framework.NewDefaultFramework("secrets")
It("should be consumable from pods in volume [Conformance] [Volume]", func() {
doSecretE2EWithoutMapping(f, nil /* default mode */, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
It("should be consumable from pods in volume with defaultMode set [Conformance] [Volume]", func() {
defaultMode := int32(0400)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), nil, nil)
})
It("should be consumable from pods in volume as non-root with defaultMode and fsGroup set [Conformance] [Volume]", func() {
defaultMode := int32(0440) /* setting fsGroup sets mode to at least 440 */
fsGroup := int64(1001)
uid := int64(1000)
doSecretE2EWithoutMapping(f, &defaultMode, "secret-test-"+string(uuid.NewUUID()), &fsGroup, &uid)
})
It("should be consumable from pods in volume with mappings [Conformance] [Volume]", func() {
doSecretE2EWithMapping(f, nil)
})
It("should be consumable from pods in volume with mappings and Item Mode set [Conformance] [Volume]", func() {
mode := int32(0400)
doSecretE2EWithMapping(f, &mode)
})
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace [Volume]", func() {
var (
namespace2 *v1.Namespace
err error
secret2Name = "secret-test-" + string(uuid.NewUUID())
)
if namespace2, err = f.CreateNamespace("secret-namespace", nil); err != nil {
framework.Failf("unable to create new namespace %s: %v", namespace2.Name, err)
}
secret2 := secretForTest(namespace2.Name, secret2Name)
secret2.Data = map[string][]byte{
"this_should_not_match_content_of_other_secret": []byte("similarly_this_should_not_match_content_of_other_secret\n"),
}
if secret2, err = f.ClientSet.Core().Secrets(namespace2.Name).Create(secret2); err != nil {
framework.Failf("unable to create test secret %s: %v", secret2.Name, err)
}
doSecretE2EWithoutMapping(f, nil /* default mode */, secret2.Name, nil, nil)
})
It("should be consumable in multiple volumes in a pod [Conformance] [Volume]", func() {
// This test ensures that the same secret can be mounted in multiple
// volumes in the same pod. This test case exists to prevent
// regressions that break this use-case.
var (
name = "secret-test-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
volumeName2 = "secret-volume-2"
volumeMountPath2 = "/etc/secret-volume-2"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: volumeName2,
MountPath: volumeMountPath2,
ReadOnly: true,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": -rw-r--r--",
})
})
It("should be consumable from pods in env vars [Conformance]", func() {
name := "secret-test-" + string(uuid.NewUUID())
secret := secretForTest(f.Namespace.Name, name)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "secret-env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
Env: []v1.EnvVar{
{
Name: "SECRET_DATA",
ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
},
},
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
f.TestContainerOutput("consume secrets", pod, 0, []string{
"SECRET_DATA=value-1",
})
})
})
func secretForTest(namespace, name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
"data-2": []byte("value-2\n"),
"data-3": []byte("value-3\n"),
},
}
}
func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secretName string, fsGroup *int64, uid *int64) {
var (
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, secretName)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if defaultMode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.DefaultMode = defaultMode
} else {
mode := int32(0644)
defaultMode = &mode
}
if fsGroup != nil || uid != nil {
pod.Spec.SecurityContext = &v1.PodSecurityContext{
FSGroup: fsGroup,
RunAsUser: uid,
}
}
modeString := fmt.Sprintf("%v", os.FileMode(*defaultMode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/data-1\": value-1",
"mode of file \"/etc/secret-volume/data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}
func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
var (
name = "secret-test-map-" + string(uuid.NewUUID())
volumeName = "secret-volume"
volumeMountPath = "/etc/secret-volume"
secret = secretForTest(f.Namespace.Name, name)
)
By(fmt.Sprintf("Creating secret with name %s", secret.Name))
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "new-path-data-1",
},
},
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
if mode != nil {
pod.Spec.Volumes[0].VolumeSource.Secret.Items[0].Mode = mode
} else {
defaultItemMode := int32(0644)
mode = &defaultItemMode
}
modeString := fmt.Sprintf("%v", os.FileMode(*mode))
expectedOutput := []string{
"content of file \"/etc/secret-volume/new-path-data-1\": value-1",
"mode of file \"/etc/secret-volume/new-path-data-1\": " + modeString,
}
f.TestContainerOutput("consume secrets", pod, 0, expectedOutput)
}

234
vendor/k8s.io/kubernetes/test/e2e/common/sysctl.go generated vendored Normal file
View file

@ -0,0 +1,234 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Sysctls", func() {
f := framework.NewDefaultFramework("sysctl")
var podClient *framework.PodClient
testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
pod := v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
Annotations: map[string]string{},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
},
},
RestartPolicy: v1.RestartPolicyNever,
},
}
return &pod
}
waitForPodErrorEventOrStarted := func(pod *v1.Pod) (*v1.Event, error) {
var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod)
if err != nil {
return false, fmt.Errorf("error in listing events: %s", err)
}
for _, e := range evnts.Items {
switch e.Reason {
case sysctl.UnsupportedReason, sysctl.ForbiddenReason:
ev = &e
return true, nil
case events.StartedContainer:
return true, nil
}
}
return false, nil
})
return ev, err
}
BeforeEach(func() {
podClient = f.PodClient()
})
It("should support sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod()
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
},
})
pod.Spec.Containers[0].Command = []string{"/bin/sysctl", "kernel.shm_rmid_forced"}
By("Creating a pod with the kernel.shm_rmid_forced sysctl")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
Expect(ev).To(BeNil())
By("Waiting for pod completion")
err = f.WaitForPodNoLongerRunning(pod.Name)
Expect(err).NotTo(HaveOccurred())
pod, err = podClient.Get(pod.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Checking that the sysctl is actually updated")
Expect(log).To(ContainSubstring("kernel.shm_rmid_forced = 1"))
})
It("should reject invalid sysctls", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "foo-",
Value: "bar",
},
{
Name: "kernel.shmmax",
Value: "100000000",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shmall",
Value: "100000000",
},
{
Name: "bar..",
Value: "42",
},
{
Name: "safe-and-unsafe",
Value: "100000000",
},
})
By("Creating a pod with one valid and two invalid sysctls")
client := f.ClientSet.Core().Pods(f.Namespace.Name)
_, err := client.Create(pod)
Expect(err).NotTo(BeNil())
Expect(err.Error()).To(ContainSubstring(`Invalid value: "foo-"`))
Expect(err.Error()).To(ContainSubstring(`Invalid value: "bar.."`))
Expect(err.Error()).To(ContainSubstring(`safe-and-unsafe`))
Expect(err.Error()).NotTo(ContainSubstring("kernel.shmmax"))
})
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod()
pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.msgmax",
Value: "10000000000",
},
})
By("Creating a pod with a greylisted, but not whitelisted sysctl on the node")
pod = podClient.Create(pod)
By("Watching for error events or started pod")
// watch for events instead of termination of pod because the kubelet deletes
// failed pods without running containers. This would create a race as the pod
// might have already been deleted here.
ev, err := waitForPodErrorEventOrStarted(pod)
Expect(err).NotTo(HaveOccurred())
if ev != nil && ev.Reason == sysctl.UnsupportedReason {
framework.Skipf("No sysctl support in Docker <1.12")
}
By("Checking that the pod was rejected")
Expect(ev).ToNot(BeNil())
Expect(ev.Reason).To(Equal("SysctlForbidden"))
})
})

50
vendor/k8s.io/kubernetes/test/e2e/common/util.go generated vendored Normal file
View file

@ -0,0 +1,50 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package common
import (
"k8s.io/apimachinery/pkg/util/sets"
)
type Suite string
const (
E2E Suite = "e2e"
NodeE2E Suite = "node e2e"
)
var CurrentSuite Suite
// CommonImageWhiteList is the list of images used in common test. These images should be prepulled
// before a tests starts, so that the tests won't fail due image pulling flakes. Currently, this is
// only used by node e2e test.
// TODO(random-liu): Change the image puller pod to use similar mechanism.
var CommonImageWhiteList = sets.NewString(
"gcr.io/google_containers/busybox:1.24",
"gcr.io/google_containers/eptest:0.1",
"gcr.io/google_containers/liveness:e2e",
"gcr.io/google_containers/mounttest:0.7",
"gcr.io/google_containers/mounttest-user:0.3",
"gcr.io/google_containers/netexec:1.4",
"gcr.io/google_containers/netexec:1.5",
"gcr.io/google_containers/nginx-slim:0.7",
"gcr.io/google_containers/serve_hostname:v1.4",
"gcr.io/google_containers/test-webserver:e2e",
"gcr.io/google_containers/hostexec:1.2",
"gcr.io/google_containers/volume-nfs:0.8",
"gcr.io/google_containers/volume-gluster:0.2",
)

499
vendor/k8s.io/kubernetes/test/e2e/common/volumes.go generated vendored Normal file
View file

@ -0,0 +1,499 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
/*
* This test checks that various VolumeSources are working.
*
* There are two ways, how to test the volumes:
* 1) With containerized server (NFS, Ceph, Gluster, iSCSI, ...)
* The test creates a server pod, exporting simple 'index.html' file.
* Then it uses appropriate VolumeSource to import this file into a client pod
* and checks that the pod can see the file. It does so by importing the file
* into web server root and loadind the index.html from it.
*
* These tests work only when privileged containers are allowed, exporting
* various filesystems (NFS, GlusterFS, ...) usually needs some mounting or
* other privileged magic in the server pod.
*
* Note that the server containers are for testing purposes only and should not
* be used in production.
*
* 2) With server outside of Kubernetes (Cinder, ...)
* Appropriate server (e.g. OpenStack Cinder) must exist somewhere outside
* the tested Kubernetes cluster. The test itself creates a new volume,
* and checks, that Kubernetes can use it as a volume.
*/
package common
import (
"fmt"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// Configuration of one tests. The test consist of:
// - server pod - runs serverImage, exports ports[]
// - client pod - does not need any special configuration
type VolumeTestConfig struct {
namespace string
// Prefix of all pods. Typically the test name.
prefix string
// Name of container image for the server pod.
serverImage string
// Ports to export from the server pod. TCP only.
serverPorts []int
// Arguments to pass to the container image.
serverArgs []string
// Volumes needed to be mounted to the server container from the host
// map <host (source) path> -> <container (dst.) path>
volumes map[string]string
}
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
podClient := f.PodClient()
portCount := len(config.serverPorts)
serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i)
serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.serverPorts[i]),
Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.volumes)
volumes := make([]v1.Volume, volumeCount)
mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.volumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
mounts[i].Name = mountName
mounts[i].ReadOnly = false
mounts[i].MountPath = dst
i++
}
By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool)
*privileged = true
serverPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
Labels: map[string]string{
"role": config.prefix + "-server",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-server",
Image: config.serverImage,
SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Args: config.serverArgs,
Ports: serverPodPorts,
VolumeMounts: mounts,
},
},
Volumes: volumes,
},
}
serverPod = podClient.CreateSync(serverPod)
By("locating the server pod")
pod, err := podClient.Get(serverPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the server pod %v: %v", serverPod.Name, err)
By("sleeping a bit to give the server time to start")
time.Sleep(20 * time.Second)
return pod
}
// Clean both server and client pods.
func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
By(fmt.Sprint("cleaning the environment after ", config.prefix))
defer GinkgoRecover()
podClient := f.PodClient()
err := podClient.Delete(config.prefix+"-client", nil)
if err != nil {
// Log the error before failing test: if the test has already failed,
// framework.ExpectNoError() won't print anything to logs!
glog.Warningf("Failed to delete client pod: %v", err)
framework.ExpectNoError(err, "Failed to delete client pod: %v", err)
}
if config.serverImage != "" {
if err := f.WaitForPodTerminated(config.prefix+"-client", ""); !apierrs.IsNotFound(err) {
framework.ExpectNoError(err, "Failed to wait client pod terminated: %v", err)
}
// See issue #24100.
// Prevent umount errors by making sure making sure the client pod exits cleanly *before* the volume server pod exits.
By("sleeping a bit so client can stop and unmount")
time.Sleep(20 * time.Second)
err = podClient.Delete(config.prefix+"-server", nil)
if err != nil {
glog.Warningf("Failed to delete server pod: %v", err)
framework.ExpectNoError(err, "Failed to delete server pod: %v", err)
}
}
}
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
By(fmt.Sprint("starting ", config.prefix, " client"))
clientPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-client",
Labels: map[string]string{
"role": config.prefix + "-client",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-client",
Image: "gcr.io/google_containers/busybox:1.24",
WorkingDir: "/opt",
// An imperative and easily debuggable container which reads vol contents for
// us to scan in the tests or by eye.
// We expect that /opt is empty in the minimal containers which we use in this test.
Command: []string{
"/bin/sh",
"-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/opt/",
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
},
},
},
}
podClient := f.PodClient()
if fsGroup != nil {
clientPod.Spec.SecurityContext.FSGroup = fsGroup
}
clientPod = podClient.CreateSync(clientPod)
By("Checking that text file contents are perfect.")
result := f.ExecCommandInPod(clientPod.Name, "cat", "/opt/index.html")
var err error
if !strings.Contains(result, expectedContent) {
err = fmt.Errorf("Failed to find \"%s\", last result: \"%s\"", expectedContent, result)
}
Expect(err).NotTo(HaveOccurred(), "failed: finding the contents of the mounted file.")
if fsGroup != nil {
By("Checking fsGroup is correct.")
_, err := framework.LookForStringInPodExec(config.namespace, clientPod.Name, []string{"ls", "-ld", "/opt"}, strconv.Itoa(int(*fsGroup)), time.Minute)
Expect(err).NotTo(HaveOccurred(), "failed: getting the right priviliges in the file %v", int(*fsGroup))
}
}
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.prefix, " injector"))
podClient := client.Core().Pods(config.namespace)
injectPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-injector",
Labels: map[string]string{
"role": config.prefix + "-injector",
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: config.prefix + "-injector",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/mnt",
},
},
},
},
SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
},
},
},
}
defer func() {
podClient.Delete(config.prefix+"-injector", nil)
}()
injectPod, err := podClient.Create(injectPod)
framework.ExpectNoError(err, "Failed to create injector pod: %v", err)
err = framework.WaitForPodSuccessInNamespace(client, injectPod.Name, injectPod.Namespace)
Expect(err).NotTo(HaveOccurred())
}
func deleteCinderVolume(name string) error {
// Try to delete the volume for several seconds - it takes
// a while for the plugin to detach it.
var output []byte
var err error
timeout := time.Second * 120
framework.Logf("Waiting up to %v for removal of cinder volume %s", timeout, name)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
output, err = exec.Command("cinder", "delete", name).CombinedOutput()
if err == nil {
framework.Logf("Cinder volume %s deleted", name)
return nil
} else {
framework.Logf("Failed to delete volume %s: %v", name, err)
}
}
framework.Logf("Giving up deleting volume %s: %v\n%s", name, err, string(output[:]))
return err
}
// These tests need privileged containers, which are disabled by default. Run
// the test with "go run hack/e2e.go ... --ginkgo.focus=[Feature:Volumes]"
var _ = framework.KubeDescribe("GCP Volumes", func() {
f := framework.NewDefaultFramework("gcp-volume")
// If 'false', the test won't clear its volumes upon completion. Useful for debugging,
// note that namespace deletion is handled by delete-namespace flag
clean := true
// filled in BeforeEach
var namespace *v1.Namespace
BeforeEach(func() {
if !isTestEnabled(f.ClientSet) {
framework.Skipf("NFS tests are not supported for this distro")
}
namespace = f.Namespace
})
////////////////////////////////////////////////////////////////////////
// NFS
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("NFSv4", func() {
It("should be mountable for NFSv4 [Volume]", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.8",
serverPorts: []int{2049},
}
defer func() {
if clean {
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(f, config)
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
volume := v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
},
}
// Must match content of test/images/volumes-tester/nfs/index.html
testVolumeClient(f, config, volume, nil, "Hello from NFS!")
})
})
////////////////////////////////////////////////////////////////////////
// Gluster
////////////////////////////////////////////////////////////////////////
framework.KubeDescribe("GlusterFS", func() {
It("should be mountable [Volume]", func() {
config := VolumeTestConfig{
namespace: namespace.Name,
prefix: "gluster",
serverImage: "gcr.io/google_containers/volume-gluster:0.2",
serverPorts: []int{24007, 24008, 49152},
}
defer func() {
if clean {
volumeTestCleanup(f, config)
}
}()
pod := startVolumeServer(f, config)
serverIP := pod.Status.PodIP
framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server
endpoints := v1.Endpoints{
TypeMeta: metav1.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
},
Subsets: []v1.EndpointSubset{
{
Addresses: []v1.EndpointAddress{
{
IP: serverIP,
},
},
Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
Protocol: v1.ProtocolTCP,
},
},
},
},
}
endClient := f.ClientSet.Core().Endpoints(config.namespace)
defer func() {
if clean {
endClient.Delete(config.prefix+"-server", nil)
}
}()
if _, err := endClient.Create(&endpoints); err != nil {
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
}
volume := v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
ReadOnly: true,
},
}
// Must match content of test/images/volumes-tester/gluster/index.html
testVolumeClient(f, config, volume, nil, "Hello from GlusterFS!")
})
})
})
func isTestEnabled(c clientset.Interface) bool {
// Enable the test on node e2e if the node image is GCI.
nodeName := framework.TestContext.NodeName
if nodeName != "" {
if strings.Contains(nodeName, "-gci-dev-") {
gciVersionRe := regexp.MustCompile("-gci-dev-([0-9]+)-")
matches := gciVersionRe.FindStringSubmatch(framework.TestContext.NodeName)
if len(matches) == 2 {
version, err := strconv.Atoi(matches[1])
if err != nil {
glog.Errorf("Error parsing GCI version from NodeName %q: %v", nodeName, err)
return false
}
return version >= 54
}
}
return false
}
// For cluster e2e test, because nodeName is empty, retrieve the node objects from api server
// and check their images. Only run NFSv4 and GlusterFS if nodes are using GCI image for now.
nodes := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodes.Items {
if !strings.Contains(node.Status.NodeInfo.OSImage, "Google Container-VM") {
return false
}
}
return true
}

394
vendor/k8s.io/kubernetes/test/e2e/cronjob.go generated vendored Normal file
View file

@ -0,0 +1,394 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
// How long to wait for a cronjob
cronJobTimeout = 5 * time.Minute
)
var (
CronJobGroupVersionResource = schema.GroupVersionResource{Group: batch.GroupName, Version: "v2alpha1", Resource: "cronjobs"}
ScheduledJobGroupVersionResource = schema.GroupVersionResource{Group: batch.GroupName, Version: "v2alpha1", Resource: "scheduledjobs"}
BatchV2Alpha1GroupVersion = schema.GroupVersion{Group: batch.GroupName, Version: "v2alpha1"}
)
var _ = framework.KubeDescribe("CronJob", func() {
f := framework.NewDefaultGroupVersionFramework("cronjob", BatchV2Alpha1GroupVersion)
BeforeEach(func() {
framework.SkipIfMissingResource(f.ClientPool, CronJobGroupVersionResource, f.Namespace.Name)
})
// multiple jobs running at once
It("should schedule multiple jobs concurrently", func() {
By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring more than one job is running at a time")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(len(activeJobs) >= 2).To(BeTrue())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// suspended should not schedule jobs
It("should not schedule jobs when suspended [Slow]", func() {
By("Creating a suspended cronjob")
cronJob := newTestCronJob("suspended", "*/1 * * * ?", batch.AllowConcurrent, true)
cronJob.Spec.Suspend = newBool(true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring no jobs are scheduled")
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, false)
Expect(err).To(HaveOccurred())
By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(jobs.Items).To(HaveLen(0))
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// only single active job is allowed for ForbidConcurrent
It("should not schedule new jobs when ForbidConcurrent [Slow]", func() {
By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
Expect(err).NotTo(HaveOccurred())
By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
By("Ensuring no more jobs are scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 2)
Expect(err).To(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// only single active job is allowed for ReplaceConcurrent
It("should replace jobs when ReplaceConcurrent", func() {
By("Creating a ReplaceConcurrent cronjob")
cronJob := newTestCronJob("replace", "*/1 * * * ?", batch.ReplaceConcurrent, true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
Expect(err).NotTo(HaveOccurred())
By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
By("Ensuring the job is replaced with a new one")
err = waitForJobReplaced(f.ClientSet, f.Namespace.Name, jobs.Items[0].Name)
Expect(err).NotTo(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// shouldn't give us unexpected warnings
It("should not emit unexpected warnings", func() {
By("Creating a cronjob")
cronJob := newTestCronJob("concurrent", "*/1 * * * ?", batch.AllowConcurrent, false)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two jobs and at least one finished job exists by listing jobs explicitly")
err = waitForJobsAtLeast(f.ClientSet, f.Namespace.Name, 2)
Expect(err).NotTo(HaveOccurred())
err = waitForAnyFinishedJob(f.ClientSet, f.Namespace.Name)
Expect(err).NotTo(HaveOccurred())
By("Ensuring no unexpected event has happened")
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob", "UnexpectedJob"})
Expect(err).NotTo(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
// deleted jobs should be removed from the active list
It("should remove from active list jobs that have been deleted", func() {
By("Creating a ForbidConcurrent cronjob")
cronJob := newTestCronJob("forbid", "*/1 * * * ?", batch.ForbidConcurrent, true)
cronJob, err := createCronJob(f.ClientSet, f.Namespace.Name, cronJob)
Expect(err).NotTo(HaveOccurred())
By("Ensuring a job is scheduled")
err = waitForActiveJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, 1)
Expect(err).NotTo(HaveOccurred())
By("Ensuring exactly one is scheduled")
cronJob, err = getCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Deleting the job")
job := cronJob.Status.Active[0]
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted")
_, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
By("Ensuring there are no active jobs in the cronjob")
err = waitForNoJobs(f.ClientSet, f.Namespace.Name, cronJob.Name, true)
Expect(err).NotTo(HaveOccurred())
By("Ensuring MissingJob event has occured")
err = checkNoEventWithReason(f.ClientSet, f.Namespace.Name, cronJob.Name, []string{"MissingJob"})
Expect(err).To(HaveOccurred())
By("Removing cronjob")
err = deleteCronJob(f.ClientSet, f.Namespace.Name, cronJob.Name)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestCronJob returns a cronjob which does one of several testing behaviors.
func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPolicy, sleep bool) *batch.CronJob {
parallelism := int32(1)
completions := int32(1)
sj := &batch.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: batch.CronJobSpec{
Schedule: schedule,
ConcurrencyPolicy: concurrencyPolicy,
JobTemplate: batch.JobTemplateSpec{
Spec: batch.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
},
},
},
},
},
},
},
},
},
}
if sleep {
sj.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "300"}
}
return sj
}
func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*batch.CronJob, error) {
return c.BatchV2alpha1().CronJobs(ns).Create(cronJob)
}
func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) {
return c.BatchV2alpha1().CronJobs(ns).Get(name, metav1.GetOptions{})
}
func deleteCronJob(c clientset.Interface, ns, name string) error {
return c.BatchV2alpha1().CronJobs(ns).Delete(name, nil)
}
// Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return len(curr.Status.Active) >= active, nil
})
}
// Wait for jobs to appear in the active list of a cronjob or not.
// When failIfNonEmpty is set, this fails if the active set of jobs is still non-empty after
// the timeout. When failIfNonEmpty is not set, this fails if the active set of jobs is still
// empty after the timeout.
func waitForNoJobs(c clientset.Interface, ns, jobName string, failIfNonEmpty bool) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.BatchV2alpha1().CronJobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
if failIfNonEmpty {
return len(curr.Status.Active) == 0, nil
} else {
return len(curr.Status.Active) != 0, nil
}
})
}
// Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
if len(jobs.Items) > 1 {
return false, fmt.Errorf("More than one job is running %+v", jobs.Items)
} else if len(jobs.Items) == 0 {
framework.Logf("Warning: Found 0 jobs in namespace %v", ns)
return false, nil
}
return jobs.Items[0].Name != previousJobName, nil
})
}
// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
return len(jobs.Items) >= atLeast, nil
})
}
// waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
for i := range jobs.Items {
if job.IsJobFinished(&jobs.Items[i]) {
return true, nil
}
}
return false, nil
})
}
// checkNoEventWithReason checks no events with a reason within a list has occured
func checkNoEventWithReason(c clientset.Interface, ns, cronJobName string, reasons []string) error {
sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Error in getting cronjob %s/%s: %v", ns, cronJobName, err)
}
events, err := c.Core().Events(ns).Search(sj)
if err != nil {
return fmt.Errorf("Error in listing events: %s", err)
}
for _, e := range events.Items {
for _, reason := range reasons {
if e.Reason == reason {
return fmt.Errorf("Found event with reason %s: %#v", reason, e)
}
}
}
return nil
}
func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job) {
for i := range jobs.Items {
j := jobs.Items[i]
if !job.IsJobFinished(&j) {
active = append(active, &j)
}
}
return
}

322
vendor/k8s.io/kubernetes/test/e2e/daemon_restart.go generated vendored Normal file
View file

@ -0,0 +1,322 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strconv"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// This test primarily checks 2 things:
// 1. Daemons restart automatically within some sane time (10m).
// 2. They don't take abnormal actions when restarted in the steady state.
// - Controller manager shouldn't overshoot replicas
// - Kubelet shouldn't restart containers
// - Scheduler should continue assigning hosts to new pods
const (
restartPollInterval = 5 * time.Second
restartTimeout = 10 * time.Minute
numPods = 10
sshPort = 22
ADD = "ADD"
DEL = "DEL"
UPDATE = "UPDATE"
)
// nodeExec execs the given cmd on node via SSH. Note that the nodeName is an sshable name,
// eg: the name returned by framework.GetMasterHost(). This is also not guaranteed to work across
// cloud providers since it involves ssh.
func nodeExec(nodeName, cmd string) (framework.SSHResult, error) {
result, err := framework.SSH(cmd, fmt.Sprintf("%v:%v", nodeName, sshPort), framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
return result, err
}
// restartDaemonConfig is a config to restart a running daemon on a node, and wait till
// it comes back up. It uses ssh to send a SIGTERM to the daemon.
type restartDaemonConfig struct {
nodeName string
daemonName string
healthzPort int
pollInterval time.Duration
pollTimeout time.Duration
}
// NewRestartConfig creates a restartDaemonConfig for the given node and daemon.
func NewRestartConfig(nodeName, daemonName string, healthzPort int, pollInterval, pollTimeout time.Duration) *restartDaemonConfig {
if !framework.ProviderIs("gce") {
framework.Logf("WARNING: SSH through the restart config might not work on %s", framework.TestContext.Provider)
}
return &restartDaemonConfig{
nodeName: nodeName,
daemonName: daemonName,
healthzPort: healthzPort,
pollInterval: pollInterval,
pollTimeout: pollTimeout,
}
}
func (r *restartDaemonConfig) String() string {
return fmt.Sprintf("Daemon %v on node %v", r.daemonName, r.nodeName)
}
// waitUp polls healthz of the daemon till it returns "ok" or the polling hits the pollTimeout
func (r *restartDaemonConfig) waitUp() {
framework.Logf("Checking if %v is up by polling for a 200 on its /healthz endpoint", r)
healthzCheck := fmt.Sprintf(
"curl -s -o /dev/null -I -w \"%%{http_code}\" http://localhost:%v/healthz", r.healthzPort)
err := wait.Poll(r.pollInterval, r.pollTimeout, func() (bool, error) {
result, err := nodeExec(r.nodeName, healthzCheck)
framework.ExpectNoError(err)
if result.Code == 0 {
httpCode, err := strconv.Atoi(result.Stdout)
if err != nil {
framework.Logf("Unable to parse healthz http return code: %v", err)
} else if httpCode == 200 {
return true, nil
}
}
framework.Logf("node %v exec command, '%v' failed with exitcode %v: \n\tstdout: %v\n\tstderr: %v",
r.nodeName, healthzCheck, result.Code, result.Stdout, result.Stderr)
return false, nil
})
framework.ExpectNoError(err, "%v did not respond with a 200 via %v within %v", r, healthzCheck, r.pollTimeout)
}
// kill sends a SIGTERM to the daemon
func (r *restartDaemonConfig) kill() {
framework.Logf("Killing %v", r)
nodeExec(r.nodeName, fmt.Sprintf("pgrep %v | xargs -I {} sudo kill {}", r.daemonName))
}
// Restart checks if the daemon is up, kills it, and waits till it comes back up
func (r *restartDaemonConfig) restart() {
r.waitUp()
r.kill()
r.waitUp()
}
// podTracker records a serial history of events that might've affects pods.
type podTracker struct {
cache.ThreadSafeStore
}
func (p *podTracker) remember(pod *v1.Pod, eventType string) {
if eventType == UPDATE && pod.Status.Phase == v1.PodRunning {
return
}
p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod)
}
func (p *podTracker) String() (msg string) {
for _, k := range p.ListKeys() {
obj, exists := p.Get(k)
if !exists {
continue
}
pod := obj.(*v1.Pod)
msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName)
}
return
}
func newPodTracker() *podTracker {
return &podTracker{cache.NewThreadSafeStore(
cache.Indexers{}, cache.Indices{})}
}
// replacePods replaces content of the store with the given pods.
func replacePods(pods []*v1.Pod, store cache.Store) {
found := make([]interface{}, 0, len(pods))
for i := range pods {
found = append(found, pods[i])
}
framework.ExpectNoError(store.Replace(found, "0"))
}
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := v1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.Core().Pods(ns).List(options)
framework.ExpectNoError(err)
failedContainers := 0
containerRestartNodes := sets.NewString()
for _, p := range pods.Items {
for _, v := range testutils.FailedContainers(&p) {
failedContainers = failedContainers + v.Restarts
containerRestartNodes.Insert(p.Spec.NodeName)
}
}
return failedContainers, containerRestartNodes.List()
}
var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
f := framework.NewDefaultFramework("daemonrestart")
rcName := "daemonrestart" + strconv.Itoa(numPods) + "-" + string(uuid.NewUUID())
labelSelector := labels.Set(map[string]string{"name": rcName}).AsSelector()
existingPods := cache.NewStore(cache.MetaNamespaceKeyFunc)
var ns string
var config testutils.RCConfig
var controller cache.Controller
var newPods cache.Store
var stopCh chan struct{}
var tracker *podTracker
BeforeEach(func() {
// These tests require SSH
framework.SkipUnlessProviderIs(framework.ProvidersWithSSH...)
ns = f.Namespace.Name
// All the restart tests need an rc and a watch on pods of the rc.
// Additionally some of them might scale the rc during the test.
config = testutils.RCConfig{
Client: f.ClientSet,
InternalClient: f.InternalClientset,
Name: rcName,
Namespace: ns,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: numPods,
CreatedPods: &[]*v1.Pod{},
}
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
replacePods(*config.CreatedPods, existingPods)
stopCh = make(chan struct{})
tracker = newPodTracker()
newPods, controller = cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector.String()
return f.ClientSet.Core().Pods(ns).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
tracker.remember(obj.(*v1.Pod), ADD)
},
UpdateFunc: func(oldObj, newObj interface{}) {
tracker.remember(newObj.(*v1.Pod), UPDATE)
},
DeleteFunc: func(obj interface{}) {
tracker.remember(obj.(*v1.Pod), DEL)
},
},
)
go controller.Run(stopCh)
})
AfterEach(func() {
close(stopCh)
})
It("Controller Manager should not create/delete replicas across restart", func() {
// Requires master ssh access.
framework.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-controller", ports.ControllerManagerPort, restartPollInterval, restartTimeout)
restarter.restart()
// The intent is to ensure the replication controller manager has observed and reported status of
// the replication controller at least once since the manager restarted, so that we can determine
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really?
existingKeys := sets.NewString()
newKeys := sets.NewString()
for _, k := range existingPods.ListKeys() {
existingKeys.Insert(k)
}
for _, k := range newPods.ListKeys() {
newKeys.Insert(k)
}
if len(newKeys.List()) != len(existingKeys.List()) ||
!newKeys.IsSuperset(existingKeys) {
framework.Failf("RcManager created/deleted pods after restart \n\n %+v", tracker)
}
})
It("Scheduler should continue assigning pods to nodes across restart", func() {
// Requires master ssh access.
framework.SkipUnlessProviderIs("gce", "aws")
restarter := NewRestartConfig(
framework.GetMasterHost(), "kube-scheduler", ports.SchedulerPort, restartPollInterval, restartTimeout)
// Create pods while the scheduler is down and make sure the scheduler picks them up by
// scaling the rc to the same size.
restarter.waitUp()
restarter.kill()
// This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
})
It("Kubelet should not restart containers across restart", func() {
nodeIPs, err := framework.GetNodePublicIps(f.ClientSet)
framework.ExpectNoError(err)
preRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if preRestarts != 0 {
framework.Logf("WARNING: Non-zero container restart count: %d across nodes %v", preRestarts, badNodes)
}
for _, ip := range nodeIPs {
restarter := NewRestartConfig(
ip, "kubelet", ports.KubeletReadOnlyPort, restartPollInterval, restartTimeout)
restarter.restart()
}
postRestarts, badNodes := getContainerRestarts(f.ClientSet, ns, labelSelector)
if postRestarts != preRestarts {
framework.DumpNodeDebugInfo(f.ClientSet, badNodes, framework.Logf)
framework.Failf("Net container restart count went from %v -> %v after kubelet restart on nodes %v \n\n %+v", preRestarts, postRestarts, badNodes, tracker)
}
})
})

401
vendor/k8s.io/kubernetes/test/e2e/daemon_set.go generated vendored Normal file
View file

@ -0,0 +1,401 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"reflect"
"strings"
"time"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// this should not be a multiple of 5, because node status updates
// every 5 seconds. See https://github.com/kubernetes/kubernetes/pull/14915.
dsRetryPeriod = 2 * time.Second
dsRetryTimeout = 5 * time.Minute
daemonsetLabelPrefix = "daemonset-"
daemonsetNameLabel = daemonsetLabelPrefix + "name"
daemonsetColorLabel = daemonsetLabelPrefix + "color"
)
// This test must be run in serial because it assumes the Daemon Set pods will
// always get scheduled. If we run other tests in parallel, this may not
// happen. In the future, running in parallel may work if we have an eviction
// model which lets the DS controller kick out other pods to make room.
// See http://issues.k8s.io/21767 for more details
var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
var f *framework.Framework
AfterEach(func() {
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), daemonsets))
} else {
framework.Logf("unable to dump daemonsets: %v", err)
}
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(api.Registry.EnabledVersions()...), pods))
} else {
framework.Logf("unable to dump pods: %v", err)
}
err := clearDaemonSetNodeLabels(f.ClientSet)
Expect(err).NotTo(HaveOccurred())
})
f = framework.NewDefaultFramework("daemonsets")
image := "gcr.io/google_containers/serve_hostname:v1.4"
dsName := "daemon-set"
var ns string
var c clientset.Interface
BeforeEach(func() {
ns = f.Namespace.Name
c = f.ClientSet
err := clearDaemonSetNodeLabels(c)
Expect(err).NotTo(HaveOccurred())
})
It("should run and stop simple daemon", func() {
label := map[string]string{daemonsetNameLabel: dsName}
framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: label,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: dsName,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to be reaped")
}()
By("Check that daemon pods launch on every node of the cluster.")
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to start")
err = checkDaemonStatus(f, dsName)
Expect(err).NotTo(HaveOccurred())
By("Stop a daemon pod, check that the daemon pod is revived.")
podClient := c.Core().Pods(ns)
selector := labels.Set(label).AsSelector()
options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(BeNumerically(">", 0))
pod := podList.Items[0]
err = podClient.Delete(pod.Name, nil)
Expect(err).NotTo(HaveOccurred())
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnAllNodes(f, label))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pod to revive")
})
It("should run and stop complex daemon", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node selector %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: complexLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: complexLabel,
},
Spec: v1.PodSpec{
NodeSelector: nodeSelector,
Containers: []v1.Container{
{
Name: dsName,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
Expect(err).NotTo(HaveOccurred())
By("remove the node selector and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.")
Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
})
It("should run and stop complex daemon with node affinity", func() {
complexLabel := map[string]string{daemonsetNameLabel: dsName}
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node affinity %s", dsName)
affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: daemonsetColorLabel,
Operator: v1.NodeSelectorOpIn,
Values: []string{nodeSelector[daemonsetColorLabel]},
},
},
},
},
},
},
}
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: complexLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: complexLabel,
},
Spec: v1.PodSpec{
Affinity: affinity,
Containers: []v1.Container{
{
Name: dsName,
Image: image,
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())
By("Initially, daemon pods should not be running on any nodes.")
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on no nodes")
By("Change label of node, check that daemon pod is launched.")
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
newNode, err := setDaemonSetNodeLabels(c, nodeList.Items[0].Name, nodeSelector)
Expect(err).NotTo(HaveOccurred(), "error setting labels on node")
daemonSetLabels, _ := separateDaemonSetNodeLabels(newNode.Labels)
Expect(len(daemonSetLabels)).To(Equal(1))
err = wait.Poll(dsRetryPeriod, dsRetryTimeout, checkDaemonPodOnNodes(f, complexLabel, []string{newNode.Name}))
Expect(err).NotTo(HaveOccurred(), "error waiting for daemon pods to be running on new nodes")
err = checkDaemonStatus(f, dsName)
Expect(err).NotTo(HaveOccurred())
By("remove the node selector and wait for daemons to be unscheduled")
_, err = setDaemonSetNodeLabels(c, nodeList.Items[0].Name, map[string]string{})
Expect(err).NotTo(HaveOccurred(), "error removing labels on node")
Expect(wait.Poll(dsRetryPeriod, dsRetryTimeout, checkRunningOnNoNodes(f, complexLabel))).
NotTo(HaveOccurred(), "error waiting for daemon pod to not be running on nodes")
By("We should now be able to delete the daemon set.")
Expect(c.Extensions().DaemonSets(ns).Delete(dsName, nil)).NotTo(HaveOccurred())
})
})
func separateDaemonSetNodeLabels(labels map[string]string) (map[string]string, map[string]string) {
daemonSetLabels := map[string]string{}
otherLabels := map[string]string{}
for k, v := range labels {
if strings.HasPrefix(k, daemonsetLabelPrefix) {
daemonSetLabels[k] = v
} else {
otherLabels[k] = v
}
}
return daemonSetLabels, otherLabels
}
func clearDaemonSetNodeLabels(c clientset.Interface) error {
nodeList := framework.GetReadySchedulableNodesOrDie(c)
for _, node := range nodeList.Items {
_, err := setDaemonSetNodeLabels(c, node.Name, map[string]string{})
if err != nil {
return err
}
}
return nil
}
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.Core().Nodes()
var newNode *v1.Node
var newLabels map[string]string
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
node, err := nodeClient.Get(nodeName, metav1.GetOptions{})
if err != nil {
return false, err
}
// remove all labels this test is creating
daemonSetLabels, otherLabels := separateDaemonSetNodeLabels(node.Labels)
if reflect.DeepEqual(daemonSetLabels, labels) {
newNode = node
return true, nil
}
node.Labels = otherLabels
for k, v := range labels {
node.Labels[k] = v
}
newNode, err = nodeClient.Update(node)
if err == nil {
newLabels, _ = separateDaemonSetNodeLabels(newNode.Labels)
return true, err
}
if se, ok := err.(*apierrs.StatusError); ok && se.ErrStatus.Reason == metav1.StatusReasonConflict {
framework.Logf("failed to update node due to resource version conflict")
return false, nil
}
return false, err
})
if err != nil {
return nil, err
} else if len(newLabels) != len(labels) {
return nil, fmt.Errorf("Could not set daemon set test labels as expected.")
}
return newNode, nil
}
func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
return func() (bool, error) {
selector := labels.Set(selector).AsSelector()
options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options)
if err != nil {
return false, nil
}
pods := podList.Items
nodesToPodCount := make(map[string]int)
for _, pod := range pods {
nodesToPodCount[pod.Spec.NodeName] += 1
}
framework.Logf("nodesToPodCount: %#v", nodesToPodCount)
// Ensure that exactly 1 pod is running on all nodes in nodeNames.
for _, nodeName := range nodeNames {
if nodesToPodCount[nodeName] != 1 {
return false, nil
}
}
// Ensure that sizes of the lists are the same. We've verified that every element of nodeNames is in
// nodesToPodCount, so verifying the lengths are equal ensures that there aren't pods running on any
// other nodes.
return len(nodesToPodCount) == len(nodeNames), nil
}
}
func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
nodeNames = append(nodeNames, node.Name)
}
return checkDaemonPodOnNodes(f, selector, nodeNames)()
}
}
func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return checkDaemonPodOnNodes(f, selector, make([]string, 0))
}
func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName, metav1.GetOptions{})
if err != nil {
return fmt.Errorf("Could not get daemon set from v1.")
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
return fmt.Errorf("Error in daemon status. DesiredScheduled: %d, CurrentScheduled: %d, Ready: %d", desired, scheduled, ready)
}
return nil
}

99
vendor/k8s.io/kubernetes/test/e2e/dashboard.go generated vendored Normal file
View file

@ -0,0 +1,99 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"net/http"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Kubernetes Dashboard", func() {
const (
uiServiceName = "kubernetes-dashboard"
uiAppName = uiServiceName
uiNamespace = api.NamespaceSystem
serverStartTimeout = 1 * time.Minute
)
f := framework.NewDefaultFramework(uiServiceName)
It("should check that the kubernetes-dashboard instance is alive", func() {
By("Checking whether the kubernetes-dashboard service exists.")
err := framework.WaitForService(f.ClientSet, uiNamespace, uiServiceName, true, framework.Poll, framework.ServiceStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("Checking to make sure the kubernetes-dashboard pods are running")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": uiAppName}))
err = testutils.WaitForPodsWithLabelRunning(f.ClientSet, uiNamespace, selector)
Expect(err).NotTo(HaveOccurred())
By("Checking to make sure we get a response from the kubernetes-dashboard.")
err = wait.Poll(framework.Poll, serverStartTimeout, func() (bool, error) {
var status int
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil {
framework.Logf("Get services proxy request failed: %v", errProxy)
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
// Query against the proxy URL for the kube-ui service.
err := proxyRequest.Namespace(uiNamespace).
Context(ctx).
Name(uiServiceName).
Timeout(framework.SingleCallTimeout).
Do().
StatusCode(&status).
Error()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Request to kube-ui failed: %v", err)
return true, err
}
framework.Logf("Request to kube-ui failed: %v", err)
} else if status != http.StatusOK {
framework.Logf("Unexpected status from kubernetes-dashboard: %v", status)
}
// Don't return err here as it aborts polling.
return status == http.StatusOK, nil
})
Expect(err).NotTo(HaveOccurred())
By("Checking that the ApiServer /ui endpoint redirects to a valid server.")
var status int
err = f.ClientSet.Core().RESTClient().Get().
AbsPath("/ui").
Timeout(framework.SingleCallTimeout).
Do().
StatusCode(&status).
Error()
Expect(err).NotTo(HaveOccurred())
Expect(status).To(Equal(http.StatusOK), "Unexpected status from /ui")
})
})

825
vendor/k8s.io/kubernetes/test/e2e/density.go generated vendored Normal file
View file

@ -0,0 +1,825 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"math"
"os"
"sort"
"strconv"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/fields"
utiluuid "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/workqueue"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MinSaturationThreshold = 2 * time.Minute
MinPodsPerSecondThroughput = 8
DensityPollInterval = 10 * time.Second
)
// Maximum container failures this test tolerates before failing.
var MaxContainerFailures = 0
type DensityTestConfig struct {
Configs []testutils.RunObjectConfig
ClientSet clientset.Interface
InternalClientset internalclientset.Interface
PollInterval time.Duration
PodCount int
// What kind of resource we want to create
kind schema.GroupKind
SecretConfigs []*testutils.SecretConfig
DaemonConfigs []*testutils.DaemonConfig
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
var apiserverMem uint64
var controllerMem uint64
var schedulerMem uint64
apiserverCPU := math.MaxFloat32
apiserverMem = math.MaxUint64
controllerCPU := math.MaxFloat32
controllerMem = math.MaxUint64
schedulerCPU := math.MaxFloat32
schedulerMem = math.MaxUint64
framework.Logf("Setting resource constraings for provider: %s", framework.TestContext.Provider)
if framework.ProviderIs("kubemark") {
if numNodes <= 5 {
apiserverCPU = 0.35
apiserverMem = 150 * (1024 * 1024)
controllerCPU = 0.15
controllerMem = 100 * (1024 * 1024)
schedulerCPU = 0.05
schedulerMem = 50 * (1024 * 1024)
} else if numNodes <= 100 {
apiserverCPU = 1.5
apiserverMem = 1500 * (1024 * 1024)
controllerCPU = 0.75
controllerMem = 750 * (1024 * 1024)
schedulerCPU = 0.75
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 500 {
apiserverCPU = 3.5
apiserverMem = 3400 * (1024 * 1024)
controllerCPU = 1.3
controllerMem = 1100 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 500 * (1024 * 1024)
} else if numNodes <= 1000 {
apiserverCPU = 5.5
apiserverMem = 4000 * (1024 * 1024)
controllerCPU = 3
controllerMem = 2000 * (1024 * 1024)
schedulerCPU = 1.5
schedulerMem = 750 * (1024 * 1024)
}
} else {
if numNodes <= 100 {
// TODO: Investigate higher apiserver consumption and
// potentially revert to 1.5cpu and 1.3GB - see #30871
apiserverCPU = 1.8
apiserverMem = 2200 * (1024 * 1024)
controllerCPU = 0.5
controllerMem = 300 * (1024 * 1024)
schedulerCPU = 0.4
schedulerMem = 150 * (1024 * 1024)
}
}
constraints := make(map[string]framework.ResourceConstraint)
constraints["fluentd-elasticsearch"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 250 * (1024 * 1024),
}
constraints["elasticsearch-logging"] = framework.ResourceConstraint{
CPUConstraint: 2,
// TODO: bring it down to 750MB again, when we lower Kubelet verbosity level. I.e. revert #19164
MemoryConstraint: 5000 * (1024 * 1024),
}
constraints["heapster"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 1800 * (1024 * 1024),
}
constraints["kibana-logging"] = framework.ResourceConstraint{
CPUConstraint: 0.2,
MemoryConstraint: 100 * (1024 * 1024),
}
constraints["kube-proxy"] = framework.ResourceConstraint{
CPUConstraint: 0.15,
MemoryConstraint: 30 * (1024 * 1024),
}
constraints["l7-lb-controller"] = framework.ResourceConstraint{
CPUConstraint: 0.15,
MemoryConstraint: 60 * (1024 * 1024),
}
constraints["influxdb"] = framework.ResourceConstraint{
CPUConstraint: 2,
MemoryConstraint: 500 * (1024 * 1024),
}
constraints["kube-apiserver"] = framework.ResourceConstraint{
CPUConstraint: apiserverCPU,
MemoryConstraint: apiserverMem,
}
constraints["kube-controller-manager"] = framework.ResourceConstraint{
CPUConstraint: controllerCPU,
MemoryConstraint: controllerMem,
}
constraints["kube-scheduler"] = framework.ResourceConstraint{
CPUConstraint: schedulerCPU,
MemoryConstraint: schedulerMem,
}
return constraints
}
func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, v1.NamespaceAll, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
for {
select {
case <-ticker.C:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
case <-stopCh:
pods := podStore.List()
startupStatus := testutils.ComputeRCStartupStatus(pods, expectedPods)
framework.Logf(startupStatus.String("Density"))
return
}
}
}
// runDensityTest will perform a density test and return the time it took for
// all pods to start
func runDensityTest(dtc DensityTestConfig) time.Duration {
defer GinkgoRecover()
// Create all secrets
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Run()
}
for i := range dtc.DaemonConfigs {
dtc.DaemonConfigs[i].Run()
}
// Start all replication controllers.
startTime := time.Now()
wg := sync.WaitGroup{}
wg.Add(len(dtc.Configs))
for i := range dtc.Configs {
config := dtc.Configs[i]
go func() {
defer GinkgoRecover()
// Call wg.Done() in defer to avoid blocking whole test
// in case of error from RunRC.
defer wg.Done()
framework.ExpectNoError(config.Run())
}()
}
logStopCh := make(chan struct{})
go logPodStartupStatus(dtc.ClientSet, dtc.PodCount, map[string]string{"type": "densityPod"}, dtc.PollInterval, logStopCh)
wg.Wait()
startupTime := time.Now().Sub(startTime)
close(logStopCh)
framework.Logf("E2E startup time for %d pods: %v", dtc.PodCount, startupTime)
framework.Logf("Throughput (pods/s) during cluster saturation phase: %v", float32(dtc.PodCount)/float32(startupTime/time.Second))
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
for _, pod := range podList.Items {
if pod.Namespace == api.NamespaceSystem {
systemPodAllocation[pod.Spec.NodeName] = append(systemPodAllocation[pod.Spec.NodeName], pod.Name)
} else {
pausePodAllocation[pod.Spec.NodeName]++
}
}
nodeNames := make([]string, 0)
for k := range pausePodAllocation {
nodeNames = append(nodeNames, k)
}
sort.Strings(nodeNames)
for _, node := range nodeNames {
framework.Logf("%v: %v pause pods, system pods: %v", node, pausePodAllocation[node], systemPodAllocation[node])
}
return startupTime
}
func cleanupDensityTest(dtc DensityTestConfig) {
defer GinkgoRecover()
By("Deleting created Collections")
// We explicitly delete all pods to have API calls necessary for deletion accounted in metrics.
for i := range dtc.Configs {
name := dtc.Configs[i].GetName()
namespace := dtc.Configs[i].GetNamespace()
kind := dtc.Configs[i].GetKind()
if framework.TestContext.GarbageCollectorEnabled && kindSupportsGarbageCollector(kind) {
By(fmt.Sprintf("Cleaning up only the %v, garbage collector will clean up the pods", kind))
err := framework.DeleteResourceAndWaitForGC(dtc.ClientSet, kind, namespace, name)
framework.ExpectNoError(err)
} else {
By(fmt.Sprintf("Cleaning up the %v and pods", kind))
err := framework.DeleteResourceAndPods(dtc.ClientSet, dtc.InternalClientset, kind, namespace, name)
framework.ExpectNoError(err)
}
}
// Delete all secrets
for i := range dtc.SecretConfigs {
dtc.SecretConfigs[i].Stop()
}
for i := range dtc.DaemonConfigs {
framework.ExpectNoError(framework.DeleteResourceAndPods(
dtc.ClientSet,
dtc.InternalClientset,
extensions.Kind("DaemonSet"),
dtc.DaemonConfigs[i].Namespace,
dtc.DaemonConfigs[i].Name,
))
}
}
// This test suite can take a long time to run, and can affect or be affected by other tests.
// So by default it is added to the ginkgo.skip list (see driver.go).
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
// IMPORTANT: This test is designed to work on large (>= 100 Nodes) clusters. For smaller ones
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() {
var c clientset.Interface
var nodeCount int
var name string
var additionalPodsPrefix string
var ns string
var uuid string
var e2eStartupTime time.Duration
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
var nodes *v1.NodeList
var masters sets.String
// Gathers data prior to framework namespace teardown
AfterEach(func() {
saturationThreshold := time.Duration((totalPods / MinPodsPerSecondThroughput)) * time.Second
if saturationThreshold < MinSaturationThreshold {
saturationThreshold = MinSaturationThreshold
}
Expect(e2eStartupTime).NotTo(BeNumerically(">", saturationThreshold))
saturationData := framework.SaturationTime{
TimeToSaturate: e2eStartupTime,
NumberOfNodes: nodeCount,
NumberOfPods: totalPods,
Throughput: float32(totalPods) / float32(e2eStartupTime/time.Second),
}
framework.Logf("Cluster saturation time: %s", framework.PrettyPrintJSON(saturationData))
// Verify latency metrics.
highLatencyRequests, err := framework.HighLatencyRequests(c)
framework.ExpectNoError(err)
Expect(highLatencyRequests).NotTo(BeNumerically(">", 0), "There should be no high-latency requests")
// Verify scheduler metrics.
// TODO: Reset metrics at the beginning of the test.
// We should do something similar to how we do it for APIserver.
if err = framework.VerifySchedulerLatency(c); err != nil {
framework.Logf("Warning: Scheduler latency not calculated, %v", err)
}
})
// Explicitly put here, to delete namespace at the end of the test
// (after measuring latency metrics, etc.).
f := framework.NewDefaultFramework("density")
f.NamespaceDeletionTimeout = time.Hour
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
masters, nodes = framework.GetMasterAndWorkerNodesOrDie(c)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
nodeCpuCapacity = nodes.Items[0].Status.Allocatable.Cpu().MilliValue()
nodeMemCapacity = nodes.Items[0].Status.Allocatable.Memory().Value()
// Terminating a namespace (deleting the remaining objects from it - which
// generally means events) can affect the current run. Thus we wait for all
// terminating namespace to be finally deleted before starting this test.
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
uuid = string(utiluuid.NewUUID())
framework.ExpectNoError(framework.ResetMetrics(c))
framework.ExpectNoError(os.Mkdir(fmt.Sprintf(framework.TestContext.OutputDir+"/%s", uuid), 0777))
framework.Logf("Listing nodes for easy debugging:\n")
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
if address.Type == v1.NodeInternalIP {
internalIP = address.Address
}
if address.Type == v1.NodeExternalIP {
externalIP = address.Address
}
}
framework.Logf("Name: %v, clusterIP: %v, externalIP: %v", node.ObjectMeta.Name, internalIP, externalIP)
}
})
type Density struct {
// Controls if e2e latency tests should be run (they are slow)
runLatencyTest bool
podsPerNode int
// Controls how often the apiserver is polled for pods
interval time.Duration
// What kind of resource we should be creating. Default: ReplicationController
kind schema.GroupKind
secretsPerPod int
daemonsPerNode int
}
densityTests := []Density{
// TODO: Expose runLatencyTest as ginkgo flag.
{podsPerNode: 3, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 50, runLatencyTest: false, kind: api.Kind("ReplicationController")},
{podsPerNode: 95, runLatencyTest: true, kind: api.Kind("ReplicationController")},
{podsPerNode: 100, runLatencyTest: false, kind: api.Kind("ReplicationController")},
// Tests for other resource types:
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment")},
{podsPerNode: 30, runLatencyTest: true, kind: batch.Kind("Job")},
// Test scheduling when daemons are preset
{podsPerNode: 30, runLatencyTest: true, kind: api.Kind("ReplicationController"), daemonsPerNode: 2},
// Test with secrets
{podsPerNode: 30, runLatencyTest: true, kind: extensions.Kind("Deployment"), secretsPerPod: 2},
}
for _, testArg := range densityTests {
feature := "ManualPerformance"
switch testArg.podsPerNode {
case 30:
if testArg.kind == api.Kind("ReplicationController") && testArg.daemonsPerNode == 0 && testArg.secretsPerPod == 0 {
feature = "Performance"
}
case 95:
feature = "HighDensityPerformance"
}
name := fmt.Sprintf("[Feature:%s] should allow starting %d pods per node using %v with %v secrets and %v daemons",
feature,
testArg.podsPerNode,
testArg.kind,
testArg.secretsPerPod,
testArg.daemonsPerNode,
)
itArg := testArg
It(name, func() {
nodePreparer := framework.NewE2ETestNodePreparer(
f.ClientSet,
[]testutils.CountToStrategy{{Count: nodeCount, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
)
framework.ExpectNoError(nodePreparer.PrepareNodes())
defer nodePreparer.CleanupNodes()
podsPerNode := itArg.podsPerNode
if podsPerNode == 30 {
f.AddonResourceConstraints = func() map[string]framework.ResourceConstraint { return density30AddonResourceVerifier(nodeCount) }()
}
totalPods = (podsPerNode - itArg.daemonsPerNode) * nodeCount
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
// nodeCountPerNamespace and CreateNamespaces are defined in load.go
numberOfCollections := (nodeCount + nodeCountPerNamespace - 1) / nodeCountPerNamespace
namespaces, err := CreateNamespaces(f, numberOfCollections, fmt.Sprintf("density-%v", testArg.podsPerNode))
framework.ExpectNoError(err)
configs := make([]testutils.RunObjectConfig, numberOfCollections)
secretConfigs := make([]*testutils.SecretConfig, 0, numberOfCollections*itArg.secretsPerPod)
// Since all RCs are created at the same time, timeout for each config
// has to assume that it will be run at the very end.
podThroughput := 20
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go
clients, internalClients, err := createClients(numberOfCollections)
for i := 0; i < numberOfCollections; i++ {
nsName := namespaces[i].Name
secretNames := []string{}
for j := 0; j < itArg.secretsPerPod; j++ {
secretName := fmt.Sprintf("density-secret-%v-%v", i, j)
secretConfigs = append(secretConfigs, &testutils.SecretConfig{
Content: map[string]string{"foo": "bar"},
Client: clients[i],
Name: secretName,
Namespace: nsName,
LogFunc: framework.Logf,
})
secretNames = append(secretNames, secretName)
}
name := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
baseConfig := &testutils.RCConfig{
Client: clients[i],
InternalClient: internalClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Name: name,
Namespace: nsName,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
Timeout: timeout,
PodStatusFile: fileHndl,
Replicas: (totalPods + numberOfCollections - 1) / numberOfCollections,
CpuRequest: nodeCpuCapacity / 100,
MemRequest: nodeMemCapacity / 100,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
SecretNames: secretNames,
}
switch itArg.kind {
case api.Kind("ReplicationController"):
configs[i] = baseConfig
case extensions.Kind("ReplicaSet"):
configs[i] = &testutils.ReplicaSetConfig{RCConfig: *baseConfig}
case extensions.Kind("Deployment"):
configs[i] = &testutils.DeploymentConfig{RCConfig: *baseConfig}
case batch.Kind("Job"):
configs[i] = &testutils.JobConfig{RCConfig: *baseConfig}
default:
framework.Failf("Unsupported kind: %v", itArg.kind)
}
}
dConfig := DensityTestConfig{
ClientSet: f.ClientSet,
InternalClientset: f.InternalClientset,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
kind: itArg.kind,
SecretConfigs: secretConfigs,
}
for i := 0; i < itArg.daemonsPerNode; i++ {
dConfig.DaemonConfigs = append(dConfig.DaemonConfigs,
&testutils.DaemonConfig{
Client: f.ClientSet,
Name: fmt.Sprintf("density-daemon-%v", i),
Namespace: f.Namespace.Name,
LogFunc: framework.Logf,
})
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
By("Scheduling additional Pods to measure startup latencies")
createTimes := make(map[string]metav1.Time, 0)
nodeNames := make(map[string]string, 0)
scheduleTimes := make(map[string]metav1.Time, 0)
runTimes := make(map[string]metav1.Time, 0)
watchTimes := make(map[string]metav1.Time, 0)
var mutex sync.Mutex
checkPod := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = metav1.Now()
createTimes[p.Name] = p.CreationTimestamp
nodeNames[p.Name] = p.Spec.NodeName
var startTime metav1.Time
for _, cs := range p.Status.ContainerStatuses {
if cs.State.Running != nil {
if startTime.Before(cs.State.Running.StartedAt) {
startTime = cs.State.Running.StartedAt
}
}
}
if startTime != metav1.NewTime(time.Time{}) {
runTimes[p.Name] = startTime
} else {
framework.Failf("Pod %v is reported to be running, but none of its containers is", p.Name)
}
}
}
}
additionalPodsPrefix = "density-latency-pod"
stopCh := make(chan struct{})
latencyPodStores := make([]cache.Store, len(namespaces))
for i := 0; i < len(namespaces); i++ {
nsName := namespaces[i].Name
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
obj, err := c.Core().Pods(nsName).List(options)
return runtime.Object(obj), err
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
return c.Core().Pods(nsName).Watch(options)
},
},
&v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
p, ok := obj.(*v1.Pod)
if !ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*v1.Pod)
if !ok {
framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
},
},
)
latencyPodStores[i] = latencyPodsStore
go controller.Run(stopCh)
}
// Create some additional pods with throughput ~5 pods/sec.
var wg sync.WaitGroup
wg.Add(nodeCount)
// Explicitly set requests here.
// Thanks to it we trigger increasing priority function by scheduling
// a pod to a node, which in turn will result in spreading latency pods
// more evenly between nodes.
cpuRequest := *resource.NewMilliQuantity(nodeCpuCapacity/5, resource.DecimalSI)
memRequest := *resource.NewQuantity(nodeMemCapacity/5, resource.DecimalSI)
if podsPerNode > 30 {
// This is to make them schedulable on high-density tests
// (e.g. 100 pods/node kubemark).
cpuRequest = *resource.NewMilliQuantity(0, resource.DecimalSI)
memRequest = *resource.NewQuantity(0, resource.DecimalSI)
}
rcNameToNsMap := map[string]string{}
for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i)
nsName := namespaces[i%len(namespaces)].Name
rcNameToNsMap[name] = nsName
go createRunningPodFromRC(&wg, c, name, nsName, framework.GetPauseImageName(f.ClientSet), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond)
}
wg.Wait()
By("Waiting for all Pods begin observed by the watch...")
waitTimeout := 10 * time.Minute
for start := time.Now(); len(watchTimes) < nodeCount; time.Sleep(10 * time.Second) {
if time.Since(start) < waitTimeout {
framework.Failf("Timeout reached waiting for all Pods being observed by the watch.")
}
}
close(stopCh)
nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() {
pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
if count > 1 {
framework.Logf("%d latency pods scheduled on %s", count, node)
}
}
}
for i := 0; i < len(namespaces); i++ {
nsName := namespaces[i].Name
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": nsName,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
options := v1.ListOptions{FieldSelector: selector}
schedEvents, err := c.Core().Events(nsName).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
for _, event := range schedEvents.Items {
if event.InvolvedObject.Name == k {
scheduleTimes[k] = event.FirstTimestamp
break
}
}
}
}
scheduleLag := make([]framework.PodLatencyData, 0)
startupLag := make([]framework.PodLatencyData, 0)
watchLag := make([]framework.PodLatencyData, 0)
schedToWatchLag := make([]framework.PodLatencyData, 0)
e2eLag := make([]framework.PodLatencyData, 0)
for name, create := range createTimes {
sched, ok := scheduleTimes[name]
if !ok {
framework.Logf("Failed to find schedule time for %v", name)
}
Expect(ok).To(Equal(true))
run, ok := runTimes[name]
if !ok {
framework.Logf("Failed to find run time for %v", name)
}
Expect(ok).To(Equal(true))
watch, ok := watchTimes[name]
if !ok {
framework.Logf("Failed to find watch time for %v", name)
}
Expect(ok).To(Equal(true))
node, ok := nodeNames[name]
if !ok {
framework.Logf("Failed to find node for %v", name)
}
Expect(ok).To(Equal(true))
scheduleLag = append(scheduleLag, framework.PodLatencyData{Name: name, Node: node, Latency: sched.Time.Sub(create.Time)})
startupLag = append(startupLag, framework.PodLatencyData{Name: name, Node: node, Latency: run.Time.Sub(sched.Time)})
watchLag = append(watchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(run.Time)})
schedToWatchLag = append(schedToWatchLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(sched.Time)})
e2eLag = append(e2eLag, framework.PodLatencyData{Name: name, Node: node, Latency: watch.Time.Sub(create.Time)})
}
sort.Sort(framework.LatencySlice(scheduleLag))
sort.Sort(framework.LatencySlice(startupLag))
sort.Sort(framework.LatencySlice(watchLag))
sort.Sort(framework.LatencySlice(schedToWatchLag))
sort.Sort(framework.LatencySlice(e2eLag))
framework.PrintLatencies(scheduleLag, "worst schedule latencies")
framework.PrintLatencies(startupLag, "worst run-after-schedule latencies")
framework.PrintLatencies(watchLag, "worst watch latencies")
framework.PrintLatencies(schedToWatchLag, "worst scheduled-to-end total latencies")
framework.PrintLatencies(e2eLag, "worst e2e total latencies")
// Test whether e2e pod startup time is acceptable.
podStartupLatency := framework.PodStartupLatency{Latency: framework.ExtractLatencyMetrics(e2eLag)}
framework.ExpectNoError(framework.VerifyPodStartupLatency(podStartupLatency))
framework.LogSuspiciousLatency(startupLag, e2eLag, nodeCount, c)
By("Removing additional replication controllers")
deleteRC := func(i int) {
defer GinkgoRecover()
name := additionalPodsPrefix + "-" + strconv.Itoa(i+1)
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(c, rcNameToNsMap[name], name))
}
workqueue.Parallelize(16, nodeCount, deleteRC)
}
cleanupDensityTest(dConfig)
})
}
// Calculate total number of pods from each node's max-pod
It("[Feature:ManualPerformance] should allow running maximum capacity pods on nodes", func() {
totalPods = 0
for _, n := range nodes.Items {
totalPods += int(n.Status.Capacity.Pods().Value())
}
totalPods -= framework.WaitForStableCluster(c, masters)
fileHndl, err := os.Create(fmt.Sprintf(framework.TestContext.OutputDir+"/%s/pod_states.csv", uuid))
framework.ExpectNoError(err)
defer fileHndl.Close()
collectionCount := 1
configs := make([]testutils.RunObjectConfig, collectionCount)
podsPerCollection := int(totalPods / collectionCount)
for i := 0; i < collectionCount; i++ {
if i == collectionCount-1 {
podsPerCollection += int(math.Mod(float64(totalPods), float64(collectionCount)))
}
name = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
configs[i] = &testutils.RCConfig{Client: c,
Image: framework.GetPauseImageName(f.ClientSet),
Name: name,
Namespace: ns,
Labels: map[string]string{"type": "densityPod"},
PollInterval: DensityPollInterval,
PodStatusFile: fileHndl,
Replicas: podsPerCollection,
MaxContainerFailures: &MaxContainerFailures,
Silent: true,
LogFunc: framework.Logf,
}
}
dConfig := DensityTestConfig{
ClientSet: f.ClientSet,
Configs: configs,
PodCount: totalPods,
PollInterval: DensityPollInterval,
}
e2eStartupTime = runDensityTest(dConfig)
cleanupDensityTest(dConfig)
})
})
func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
},
Spec: v1.ReplicationControllerSpec{
Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
Selector: labels,
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: cpuRequest,
v1.ResourceMemory: memRequest,
},
},
},
},
DNSPolicy: v1.DNSDefault,
},
},
},
}
_, err := c.Core().ReplicationControllers(ns).Create(rc)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForControlledPodsRunning(c, ns, name, api.Kind("ReplicationController")))
framework.Logf("Found pod '%s' running", name)
}
func kindSupportsGarbageCollector(kind schema.GroupKind) bool {
return kind != extensions.Kind("Deployment") && kind != batch.Kind("Job")
}

1417
vendor/k8s.io/kubernetes/test/e2e/deployment.go generated vendored Normal file

File diff suppressed because it is too large Load diff

294
vendor/k8s.io/kubernetes/test/e2e/disruption.go generated vendored Normal file
View file

@ -0,0 +1,294 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
)
// schedulingTimeout is longer specifically because sometimes we need to wait
// awhile to guarantee that we've been patient waiting for something ordinary
// to happen: a pod to get scheduled and move into Ready
const (
bigClusterSize = 7
schedulingTimeout = 10 * time.Minute
timeout = 60 * time.Second
)
var _ = framework.KubeDescribe("DisruptionController", func() {
f := framework.NewDefaultFramework("disruption")
var ns string
var cs *kubernetes.Clientset
BeforeEach(func() {
cs = f.StagingClient
ns = f.Namespace.Name
})
It("should create a PodDisruptionBudget", func() {
createPodDisruptionBudgetOrDie(cs, ns, intstr.FromString("1%"))
})
It("should update PodDisruptionBudget status", func() {
createPodDisruptionBudgetOrDie(cs, ns, intstr.FromInt(2))
createPodsOrDie(cs, ns, 3)
waitForPodsOrDie(cs, ns, 3)
// Since disruptionAllowed starts out 0, if we see it ever become positive,
// that means the controller is working.
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
pdb, err := cs.Policy().PodDisruptionBudgets(ns).Get("foo", metav1.GetOptions{})
if err != nil {
return false, err
}
return pdb.Status.PodDisruptionsAllowed > 0, nil
})
Expect(err).NotTo(HaveOccurred())
})
evictionCases := []struct {
description string
minAvailable intstr.IntOrString
podCount int
replicaSetSize int32
shouldDeny bool
exclusive bool
skipForBigClusters bool
}{
{
description: "no PDB",
minAvailable: intstr.FromString(""),
podCount: 1,
shouldDeny: false,
}, {
description: "too few pods, absolute",
minAvailable: intstr.FromInt(2),
podCount: 2,
shouldDeny: true,
}, {
description: "enough pods, absolute",
minAvailable: intstr.FromInt(2),
podCount: 3,
shouldDeny: false,
}, {
description: "enough pods, replicaSet, percentage",
minAvailable: intstr.FromString("90%"),
replicaSetSize: 10,
exclusive: false,
shouldDeny: false,
}, {
description: "too few pods, replicaSet, percentage",
minAvailable: intstr.FromString("90%"),
replicaSetSize: 10,
exclusive: true,
shouldDeny: true,
// This tests assumes that there is less than replicaSetSize nodes in the cluster.
skipForBigClusters: true,
},
}
for i := range evictionCases {
c := evictionCases[i]
expectation := "should allow an eviction"
if c.shouldDeny {
expectation = "should not allow an eviction"
}
It(fmt.Sprintf("evictions: %s => %s", c.description, expectation), func() {
if c.skipForBigClusters {
framework.SkipUnlessNodeCountIsAtMost(bigClusterSize - 1)
}
createPodsOrDie(cs, ns, c.podCount)
if c.replicaSetSize > 0 {
createReplicaSetOrDie(cs, ns, c.replicaSetSize, c.exclusive)
}
if c.minAvailable.String() != "" {
createPodDisruptionBudgetOrDie(cs, ns, c.minAvailable)
}
// Locate a running pod.
var pod v1.Pod
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
podList, err := cs.Pods(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
for i := range podList.Items {
if podList.Items[i].Status.Phase == v1.PodRunning {
pod = podList.Items[i]
return true, nil
}
}
return false, nil
})
Expect(err).NotTo(HaveOccurred())
e := &policy.Eviction{
ObjectMeta: v1.ObjectMeta{
Name: pod.Name,
Namespace: ns,
},
}
if c.shouldDeny {
// Since disruptionAllowed starts out false, wait at least 60s hoping that
// this gives the controller enough time to have truly set the status.
time.Sleep(timeout)
err = cs.Pods(ns).Evict(e)
Expect(err).Should(MatchError("Cannot evict pod as it would violate the pod's disruption budget."))
} else {
// Only wait for running pods in the "allow" case
// because one of shouldDeny cases relies on the
// replicaSet not fitting on the cluster.
waitForPodsOrDie(cs, ns, c.podCount+int(c.replicaSetSize))
// Since disruptionAllowed starts out false, if an eviction is ever allowed,
// that means the controller is working.
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err = cs.Pods(ns).Evict(e)
if err != nil {
return false, nil
} else {
return true, nil
}
})
Expect(err).NotTo(HaveOccurred())
}
})
}
})
func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) {
pdb := policy.PodDisruptionBudget{
ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: ns,
},
Spec: policy.PodDisruptionBudgetSpec{
Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
MinAvailable: minAvailable,
},
}
_, err := cs.Policy().PodDisruptionBudgets(ns).Create(&pdb)
Expect(err).NotTo(HaveOccurred())
}
func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
for i := 0; i < n; i++ {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i),
Namespace: ns,
Labels: map[string]string{"foo": "bar"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4",
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
}
_, err := cs.Pods(ns).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q", pod.Name, ns)
}
}
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
if err != nil {
return false, err
}
if pods == nil {
return false, fmt.Errorf("pods is nil")
}
if len(pods.Items) < n {
framework.Logf("pods: %v < %v", len(pods.Items), n)
return false, nil
}
ready := 0
for i := 0; i < n; i++ {
if pods.Items[i].Status.Phase == v1.PodRunning {
ready++
}
}
if ready < n {
framework.Logf("running pods: %v < %v", ready, n)
return false, nil
}
return true, nil
})
framework.ExpectNoError(err, "Waiting for pods in namespace %q to be ready", ns)
}
func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, exclusive bool) {
container := v1.Container{
Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4",
}
if exclusive {
container.Ports = []v1.ContainerPort{
{HostPort: 5555, ContainerPort: 5555},
}
}
rs := &extensions.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: "rs",
Namespace: ns,
},
Spec: extensions.ReplicaSetSpec{
Replicas: &size,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"foo": "bar"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{container},
},
},
},
}
_, err := cs.Extensions().ReplicaSets(ns).Create(rs)
framework.ExpectNoError(err, "Creating replica set %q in namespace %q", rs.Name, ns)
}

513
vendor/k8s.io/kubernetes/test/e2e/dns.go generated vendored Normal file
View file

@ -0,0 +1,513 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
)
const dnsTestPodHostName = "dns-querier-1"
const dnsTestServiceName = "dns-test-service"
var dnsServiceLabelSelector = labels.Set{
"k8s-app": "kube-dns",
"kubernetes.io/cluster-service": "true",
}.AsSelector()
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotation bool) *v1.Pod {
dnsPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: "dns-test-" + string(uuid.NewUUID()),
Namespace: namespace,
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: "results",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
// TODO: Consider scraping logs instead of running a webserver.
{
Name: "webserver",
Image: "gcr.io/google_containers/test-webserver:e2e",
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
},
},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
{
Name: "querier",
Image: "gcr.io/google_containers/dnsutils:e2e",
Command: []string{"sh", "-c", wheezyProbeCmd},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
{
Name: "jessie-querier",
Image: "gcr.io/google_containers/jessie-dnsutils:e2e",
Command: []string{"sh", "-c", jessieProbeCmd},
VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
},
},
},
},
},
}
if useAnnotation {
dnsPod.ObjectMeta.Annotations = map[string]string{
pod.PodHostnameAnnotation: dnsTestPodHostName,
pod.PodSubdomainAnnotation: dnsTestServiceName,
}
} else {
dnsPod.Spec.Hostname = dnsTestPodHostName
dnsPod.Spec.Subdomain = dnsTestServiceName
}
return dnsPod
}
func createProbeCommand(namesToResolve []string, hostEntries []string, ptrLookupIP string, fileNamePrefix, namespace string) (string, []string) {
fileNames := make([]string, 0, len(namesToResolve)*2)
probeCmd := "for i in `seq 1 600`; do "
for _, name := range namesToResolve {
// Resolve by TCP and UDP DNS. Use $$(...) because $(...) is
// expanded by kubernetes (though this won't expand so should
// remain a literal, safe > sorry).
lookup := "A"
if strings.HasPrefix(name, "_") {
lookup = "SRV"
}
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
fileName = fmt.Sprintf("%s_tcp@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s %s)" && echo OK > /results/%s;`, name, lookup, fileName)
}
for _, name := range hostEntries {
fileName := fmt.Sprintf("%s_hosts@%s", fileNamePrefix, name)
fileNames = append(fileNames, fileName)
probeCmd += fmt.Sprintf(`test -n "$$(getent hosts %s)" && echo OK > /results/%s;`, name, fileName)
}
podARecByUDPFileName := fmt.Sprintf("%s_udp@PodARecord", fileNamePrefix)
podARecByTCPFileName := fmt.Sprintf("%s_tcp@PodARecord", fileNamePrefix)
probeCmd += fmt.Sprintf(`podARec=$$(hostname -i| awk -F. '{print $$1"-"$$2"-"$$3"-"$$4".%s.pod.cluster.local"}');`, namespace)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search $${podARec} A)" && echo OK > /results/%s;`, podARecByTCPFileName)
fileNames = append(fileNames, podARecByUDPFileName)
fileNames = append(fileNames, podARecByTCPFileName)
if len(ptrLookupIP) > 0 {
ptrLookup := fmt.Sprintf("%s.in-addr.arpa.", strings.Join(reverseArray(strings.Split(ptrLookupIP, ".")), "."))
ptrRecByUDPFileName := fmt.Sprintf("%s_udp@PTR", ptrLookupIP)
ptrRecByTCPFileName := fmt.Sprintf("%s_tcp@PTR", ptrLookupIP)
probeCmd += fmt.Sprintf(`test -n "$$(dig +notcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByUDPFileName)
probeCmd += fmt.Sprintf(`test -n "$$(dig +tcp +noall +answer +search %s PTR)" && echo OK > /results/%s;`, ptrLookup, ptrRecByTCPFileName)
fileNames = append(fileNames, ptrRecByUDPFileName)
fileNames = append(fileNames, ptrRecByTCPFileName)
}
probeCmd += "sleep 1; done"
return probeCmd, fileNames
}
// createTargetedProbeCommand returns a command line that performs a DNS lookup for a specific record type
func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePrefix string) (string, string) {
fileName := fmt.Sprintf("%s_udp@%s", fileNamePrefix, nameToResolve)
probeCmd := fmt.Sprintf("dig +short +tries=12 +norecurse %s %s > /results/%s", nameToResolve, lookup, fileName)
return probeCmd, fileName
}
func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
assertFilesContain(fileNames, fileDir, pod, client, false, "")
}
func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
failed = []string{}
subResourceProxyAvailable, err := framework.ServerVersionGTE(framework.SubResourcePodProxyVersion, client.Discovery())
if err != nil {
return false, err
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
var contents []byte
for _, fileName := range fileNames {
if subResourceProxyAvailable {
contents, err = client.Core().RESTClient().Get().
Context(ctx).
Namespace(pod.Namespace).
Resource("pods").
SubResource("proxy").
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw()
} else {
contents, err = client.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Resource("pods").
Namespace(pod.Namespace).
Name(pod.Name).
Suffix(fileDir, fileName).
Do().Raw()
}
if err != nil {
if ctx.Err() != nil {
framework.Failf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
} else {
framework.Logf("Unable to read %s from pod %s: %v", fileName, pod.Name, err)
}
failed = append(failed, fileName)
} else if check && strings.TrimSpace(string(contents)) != expected {
framework.Logf("File %s from pod %s contains '%s' instead of '%s'", fileName, pod.Name, string(contents), expected)
failed = append(failed, fileName)
}
}
if len(failed) == 0 {
return true, nil
}
framework.Logf("Lookups using %s failed for: %v\n", pod.Name, failed)
return false, nil
}))
Expect(len(failed)).To(Equal(0))
}
func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find results for each expected name.
By("looking for the results for each expected name from probers")
assertFilesExist(fileNames, "results", pod, f.ClientSet)
// TODO: probe from the host, too.
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("retrieving the pod")
pod, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod %s: %v", pod.Name, err)
}
// Try to find the expected value for each expected name.
By("looking for the results for each expected name from probers")
assertFilesContain(fileNames, "results", pod, f.ClientSet, true, value)
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
func verifyDNSPodIsRunning(f *framework.Framework) {
systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
By("Waiting for DNS Service to be Running")
options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
dnsPods, err := systemClient.List(options)
if err != nil {
framework.Failf("Failed to list all dns service pods")
}
if len(dnsPods.Items) < 1 {
framework.Failf("No pods match the label selector %v", dnsServiceLabelSelector.String())
}
pod := dnsPods.Items[0]
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod))
}
func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
headlessService := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: serviceName,
},
Spec: v1.ServiceSpec{
Selector: selector,
},
}
if externalName != "" {
headlessService.Spec.Type = v1.ServiceTypeExternalName
headlessService.Spec.ExternalName = externalName
} else {
headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
}
if isHeadless {
headlessService.Spec.ClusterIP = "None"
}
return headlessService
}
func reverseArray(arr []string) []string {
for i := 0; i < len(arr)/2; i++ {
j := len(arr) - i - 1
arr[i], arr[j] = arr[j], arr[i]
}
return arr
}
var _ = framework.KubeDescribe("DNS", func() {
f := framework.NewDefaultFramework("dns")
It("should provide DNS for the cluster [Conformance]", func() {
// All the names we need to be able to resolve.
// TODO: Spin up a separate test service and test that dns works for that service.
namesToResolve := []string{
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster.local",
"google.com",
}
// Added due to #8512. This is critical for GCE and GKE deployments.
if framework.ProviderIs("gce", "gke") {
namesToResolve = append(namesToResolve, "metadata")
}
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", dnsTestPodHostName, dnsTestServiceName, f.Namespace.Name)
hostEntries := []string{hostFQDN, dnsTestPodHostName}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostEntries, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostEntries, "", "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for services [Conformance]", func() {
// Create a test headless service.
By("Creating a test headless service")
testServiceSelector := map[string]string{
"dns-test": "true",
}
headlessService := createServiceSpec(dnsTestServiceName, "", true, testServiceSelector)
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
regularService := createServiceSpec("test-service-2", "", false, testServiceSelector)
regularService, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(regularService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test service")
defer GinkgoRecover()
f.ClientSet.Core().Services(f.Namespace.Name).Delete(regularService.Name, nil)
}()
// All the names we need to be able to resolve.
// TODO: Create more endpoints and ensure that multiple A records are returned
// for headless service.
namesToResolve := []string{
fmt.Sprintf("%s", headlessService.Name),
fmt.Sprintf("%s.%s", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("%s.%s.svc", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("_http._tcp.%s.%s.svc", headlessService.Name, f.Namespace.Name),
fmt.Sprintf("_http._tcp.%s.%s.svc", regularService.Name, f.Namespace.Name),
}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, nil, regularService.Spec.ClusterIP, "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, false)
pod.ObjectMeta.Labels = testServiceSelector
validateDNSResults(f, pod, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for pods for Hostname and Subdomain Annotation", func() {
// Create a test headless service.
By("Creating a test headless service")
testServiceSelector := map[string]string{
"dns-test-hostname-attribute": "true",
}
serviceName := "dns-test-service-2"
podHostname := "dns-querier-2"
headlessService := createServiceSpec(serviceName, "", true, testServiceSelector)
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(headlessService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test headless service")
defer GinkgoRecover()
f.ClientSet.Core().Services(f.Namespace.Name).Delete(headlessService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.%s.svc.cluster.local", podHostname, serviceName, f.Namespace.Name)
hostNames := []string{hostFQDN, podHostname}
namesToResolve := []string{hostFQDN}
wheezyProbeCmd, wheezyFileNames := createProbeCommand(namesToResolve, hostNames, "", "wheezy", f.Namespace.Name)
jessieProbeCmd, jessieFileNames := createProbeCommand(namesToResolve, hostNames, "", "jessie", f.Namespace.Name)
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
pod1.ObjectMeta.Labels = testServiceSelector
pod1.ObjectMeta.Annotations = map[string]string{
pod.PodHostnameAnnotation: podHostname,
pod.PodSubdomainAnnotation: serviceName,
}
validateDNSResults(f, pod1, append(wheezyFileNames, jessieFileNames...))
})
It("should provide DNS for ExternalName services", func() {
// Create a test ExternalName service.
By("Creating a test externalName service")
serviceName := "dns-test-service-3"
externalNameService := createServiceSpec(serviceName, "foo.example.com", false, nil)
_, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(externalNameService)
Expect(err).NotTo(HaveOccurred())
defer func() {
By("deleting the test externalName service")
defer GinkgoRecover()
f.ClientSet.Core().Services(f.Namespace.Name).Delete(externalNameService.Name, nil)
}()
hostFQDN := fmt.Sprintf("%s.%s.svc.cluster.local", serviceName, f.Namespace.Name)
wheezyProbeCmd, wheezyFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName := createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a pod to probe DNS")
pod1 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
validateTargetedProbeOutput(f, pod1, []string{wheezyFileName, jessieFileName}, "foo.example.com.")
// Test changing the externalName field
By("changing the externalName to bar.example.com")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com"
})
Expect(err).NotTo(HaveOccurred())
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "CNAME", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a second pod to probe DNS")
pod2 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
validateTargetedProbeOutput(f, pod2, []string{wheezyFileName, jessieFileName}, "bar.example.com.")
// Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP")
_, err = framework.UpdateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.ClusterIP = "127.1.2.3"
s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
})
Expect(err).NotTo(HaveOccurred())
wheezyProbeCmd, wheezyFileName = createTargetedProbeCommand(hostFQDN, "A", "wheezy")
jessieProbeCmd, jessieFileName = createTargetedProbeCommand(hostFQDN, "A", "jessie")
By("Running these commands on wheezy: " + wheezyProbeCmd + "\n")
By("Running these commands on jessie: " + jessieProbeCmd + "\n")
// Run a pod which probes DNS and exposes the results by HTTP.
By("creating a third pod to probe DNS")
pod3 := createDNSPod(f.Namespace.Name, wheezyProbeCmd, jessieProbeCmd, true)
validateTargetedProbeOutput(f, pod3, []string{wheezyFileName, jessieFileName}, "127.1.2.3")
})
})

348
vendor/k8s.io/kubernetes/test/e2e/dns_autoscaling.go generated vendored Normal file
View file

@ -0,0 +1,348 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"math"
"reflect"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
DNSdefaultTimeout = 5 * time.Minute
ClusterAddonLabelKey = "k8s-app"
DNSLabelName = "kube-dns"
DNSAutoscalerLabelName = "kube-dns-autoscaler"
)
var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
f := framework.NewDefaultFramework("dns-autoscaling")
var c clientset.Interface
var previousParams map[string]string
var originDNSReplicasCount int
DNSParams_1 := DNSParamsLinear{
nodesPerReplica: 1,
}
DNSParams_2 := DNSParamsLinear{
nodesPerReplica: 2,
}
DNSParams_3 := DNSParamsLinear{
nodesPerReplica: 3,
coresPerReplica: 3,
}
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
c = f.ClientSet
Expect(len(framework.GetReadySchedulableNodesOrDie(c).Items)).NotTo(BeZero())
By("Collecting original replicas count and DNS scaling params")
var err error
originDNSReplicasCount, err = getDNSReplicas(c)
Expect(err).NotTo(HaveOccurred())
pcm, err := fetchDNSScalingConfigMap(c)
Expect(err).NotTo(HaveOccurred())
previousParams = pcm.Data
})
// This test is separated because it is slow and need to run serially.
// Will take around 5 minutes to run on a 4 nodes cluster.
It("[Serial] [Slow] kube-dns-autoscaler should scale kube-dns pods when cluster size changed", func() {
By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Restoring intial dns autoscaling parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
By("Wait for number of running and ready kube-dns pods recover")
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
_, err := framework.WaitForPodsWithLabelRunningReady(c, api.NamespaceSystem, label, originDNSReplicasCount, DNSdefaultTimeout)
Expect(err).NotTo(HaveOccurred())
}()
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
originalSizes := make(map[string]int)
sum := 0
for _, mig := range strings.Split(framework.TestContext.CloudConfig.NodeInstanceGroup, ",") {
size, err := GroupSize(mig)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Initial size of %s: %d", mig, size))
originalSizes[mig] = size
sum += size
}
By("Manually increase cluster size")
increasedSize := 0
increasedSizes := make(map[string]int)
for key, val := range originalSizes {
increasedSizes[key] = val + 1
increasedSize += increasedSizes[key]
}
setMigSizes(increasedSizes)
Expect(WaitForClusterSizeFunc(c,
func(size int) bool { return size == increasedSize }, scaleUpTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("Restoring cluster size")
setMigSizes(originalSizes)
Expect(framework.WaitForClusterSize(c, sum, scaleDownTimeout)).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
It("kube-dns-autoscaler should scale kube-dns pods in both nonfaulty and faulty scenarios", func() {
By("Replace the dns autoscaling parameters with testing parameters")
err := updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
defer func() {
By("Restoring intial dns autoscaling parameters")
Expect(updateDNSScalingConfigMap(c, packDNSScalingConfigMap(previousParams))).NotTo(HaveOccurred())
}()
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear := getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should scale kube-dns based on changed parameters ---")
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_3)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_3)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should re-create scaling parameters with default value when parameters got deleted ---")
By("Delete the ConfigMap for autoscaler")
err = deleteDNSScalingConfigMap(c)
Expect(err).NotTo(HaveOccurred())
By("Wait for the ConfigMap got re-created")
configMap, err := waitForDNSConfigMapCreated(c, DNSdefaultTimeout)
Expect(err).NotTo(HaveOccurred())
By("Check the new created ConfigMap got the same data as we have")
Expect(reflect.DeepEqual(previousParams, configMap.Data)).To(Equal(true))
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_2)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_2)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
By("--- Scenario: should recover after autoscaler pod got deleted ---")
By("Delete the autoscaler pod for kube-dns")
Expect(deleteDNSAutoscalerPod(c)).NotTo(HaveOccurred())
By("Replace the dns autoscaling parameters with another testing parameters")
err = updateDNSScalingConfigMap(c, packDNSScalingConfigMap(packLinearParams(&DNSParams_1)))
Expect(err).NotTo(HaveOccurred())
By("Wait for kube-dns scaled to expected number")
getExpectReplicasLinear = getExpectReplicasFuncLinear(c, &DNSParams_1)
Expect(waitForDNSReplicasSatisfied(c, getExpectReplicasLinear, DNSdefaultTimeout)).NotTo(HaveOccurred())
})
})
type DNSParamsLinear struct {
nodesPerReplica float64
coresPerReplica float64
min int
max int
}
type getExpectReplicasFunc func(c clientset.Interface) int
func getExpectReplicasFuncLinear(c clientset.Interface, params *DNSParamsLinear) getExpectReplicasFunc {
return func(c clientset.Interface) int {
var replicasFromNodes float64
var replicasFromCores float64
nodes := framework.GetReadySchedulableNodesOrDie(c).Items
if params.nodesPerReplica > 0 {
replicasFromNodes = math.Ceil(float64(len(nodes)) / params.nodesPerReplica)
}
if params.coresPerReplica > 0 {
replicasFromCores = math.Ceil(float64(getScheduableCores(nodes)) / params.coresPerReplica)
}
return int(math.Max(1.0, math.Max(replicasFromNodes, replicasFromCores)))
}
}
func getScheduableCores(nodes []v1.Node) int64 {
var sc resource.Quantity
for _, node := range nodes {
if !node.Spec.Unschedulable {
sc.Add(node.Status.Capacity[v1.ResourceCPU])
}
}
scInt64, scOk := sc.AsInt64()
if !scOk {
framework.Logf("Unable to compute integer values of schedulable cores in the cluster")
return 0
}
return scInt64
}
func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(api.NamespaceSystem).Get(DNSAutoscalerLabelName, metav1.GetOptions{})
if err != nil {
return nil, err
}
return cm, nil
}
func deleteDNSScalingConfigMap(c clientset.Interface) error {
if err := c.Core().ConfigMaps(api.NamespaceSystem).Delete(DNSAutoscalerLabelName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap deleted.")
return nil
}
func packLinearParams(params *DNSParamsLinear) map[string]string {
paramsMap := make(map[string]string)
paramsMap["linear"] = fmt.Sprintf("{\"nodesPerReplica\": %v,\"coresPerReplica\": %v,\"min\": %v,\"max\": %v}",
params.nodesPerReplica,
params.coresPerReplica,
params.min,
params.max)
return paramsMap
}
func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
configMap := v1.ConfigMap{}
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
configMap.ObjectMeta.Namespace = api.NamespaceSystem
configMap.Data = params
return &configMap
}
func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.Core().ConfigMaps(api.NamespaceSystem).Update(configMap)
if err != nil {
return err
}
framework.Logf("DNS autoscaling ConfigMap updated.")
return nil
}
func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSLabelName}))
listOpts := v1.ListOptions{LabelSelector: label.String()}
deployments, err := c.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
if err != nil {
return 0, err
}
if len(deployments.Items) != 1 {
return 0, fmt.Errorf("expected 1 DNS deployment, got %v", len(deployments.Items))
}
deployment := deployments.Items[0]
return int(*(deployment.Spec.Replicas)), nil
}
func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(api.NamespaceSystem).List(listOpts)
if err != nil {
return err
}
if len(pods.Items) != 1 {
return fmt.Errorf("expected 1 autoscaler pod, got %v", len(pods.Items))
}
podName := pods.Items[0].Name
if err := c.Core().Pods(api.NamespaceSystem).Delete(podName, nil); err != nil {
return err
}
framework.Logf("DNS autoscaling pod %v deleted.", podName)
return nil
}
func waitForDNSReplicasSatisfied(c clientset.Interface, getExpected getExpectReplicasFunc, timeout time.Duration) (err error) {
var current int
var expected int
framework.Logf("Waiting up to %v for kube-dns to reach expected replicas", timeout)
condition := func() (bool, error) {
current, err = getDNSReplicas(c)
if err != nil {
return false, err
}
expected = getExpected(c)
if current != expected {
framework.Logf("Replicas not as expected: got %v, expected %v", current, expected)
return false, nil
}
return true, nil
}
if err = wait.Poll(2*time.Second, timeout, condition); err != nil {
return fmt.Errorf("err waiting for DNS replicas to satisfy %v, got %v: %v", expected, current, err)
}
framework.Logf("kube-dns reaches expected replicas: %v", expected)
return nil
}
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c)
if err != nil {
return false, nil
}
return true, nil
}
if err = wait.Poll(time.Second, timeout, condition); err != nil {
return nil, fmt.Errorf("err waiting for DNS autoscaling ConfigMap got re-created: %v", err)
}
return configMap, nil
}

315
vendor/k8s.io/kubernetes/test/e2e/dns_configmap.go generated vendored Normal file
View file

@ -0,0 +1,315 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
type dnsConfigMapTest struct {
f *framework.Framework
c clientset.Interface
ns string
name string
labels []string
cm *v1.ConfigMap
fedMap map[string]string
isValid bool
dnsPod *v1.Pod
utilPod *v1.Pod
utilService *v1.Service
}
var _ = framework.KubeDescribe("DNS config map", func() {
test := &dnsConfigMapTest{
f: framework.NewDefaultFramework("dns-config-map"),
ns: "kube-system",
name: "kube-dns",
}
BeforeEach(func() {
test.c = test.f.ClientSet
})
It("should be able to change configuration", func() {
test.run()
})
})
func (t *dnsConfigMapTest) init() {
By("Finding a DNS pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kube-dns"}))
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := t.f.ClientSet.Core().Pods("kube-system").List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Items)).Should(BeNumerically(">=", 1))
t.dnsPod = &pods.Items[0]
framework.Logf("Using DNS pod: %v", t.dnsPod.Name)
}
func (t *dnsConfigMapTest) run() {
t.init()
defer t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil)
t.createUtilPod()
defer t.deleteUtilPod()
t.validate()
t.labels = []string{"abc", "ghi"}
valid1 := map[string]string{"federations": t.labels[0] + "=def"}
valid1m := map[string]string{t.labels[0]: "def"}
valid2 := map[string]string{"federations": t.labels[1] + "=xyz"}
valid2m := map[string]string{t.labels[1]: "xyz"}
invalid := map[string]string{"federations": "invalid.map=xyz"}
By("empty -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
By("valid1 -> valid2")
t.setConfigMap(&v1.ConfigMap{Data: valid2}, valid2m, true)
t.validate()
By("valid2 -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
By("invalid -> valid1")
t.setConfigMap(&v1.ConfigMap{Data: valid1}, valid1m, true)
t.validate()
By("valid1 -> deleted")
t.deleteConfigMap()
t.validate()
By("deleted -> invalid")
t.setConfigMap(&v1.ConfigMap{Data: invalid}, nil, false)
t.validate()
}
func (t *dnsConfigMapTest) validate() {
t.validateFederation()
}
func (t *dnsConfigMapTest) validateFederation() {
federations := t.fedMap
if len(federations) == 0 {
By(fmt.Sprintf("Validating federation labels %v do not exist", t.labels))
for _, label := range t.labels {
var federationDNS = fmt.Sprintf("e2e-dns-configmap.%s.%s.svc.cluster.local.",
t.f.Namespace.Name, label)
predicate := func(actual []string) bool {
return len(actual) == 0
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
}
} else {
for label := range federations {
var federationDNS = fmt.Sprintf("%s.%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name, label)
var localDNS = fmt.Sprintf("%s.%s.svc.cluster.local.",
t.utilService.ObjectMeta.Name, t.f.Namespace.Name)
// Check local mapping. Checking a remote mapping requires
// creating an arbitrary DNS record which is not possible at the
// moment.
By(fmt.Sprintf("Validating federation record %v", label))
predicate := func(actual []string) bool {
for _, v := range actual {
if v == localDNS {
return true
}
}
return false
}
t.checkDNSRecord(federationDNS, predicate, wait.ForeverTestTimeout)
}
}
}
func (t *dnsConfigMapTest) checkDNSRecord(name string, predicate func([]string) bool, timeout time.Duration) {
var actual []string
err := wait.PollImmediate(
time.Duration(1)*time.Second,
timeout,
func() (bool, error) {
actual = t.runDig(name)
if predicate(actual) {
return true, nil
}
return false, nil
})
if err != nil {
framework.Logf("dig result did not match: %#v after %v",
actual, timeout)
}
}
// runDig querying for `dnsName`. Returns a list of responses.
func (t *dnsConfigMapTest) runDig(dnsName string) []string {
cmd := []string{
"/usr/bin/dig",
"+short",
"@" + t.dnsPod.Status.PodIP,
"-p", "10053", dnsName,
}
stdout, stderr, err := t.f.ExecWithOptions(framework.ExecOptions{
Command: cmd,
Namespace: t.f.Namespace.Name,
PodName: t.utilPod.Name,
ContainerName: "util",
CaptureStdout: true,
CaptureStderr: true,
})
framework.Logf("Running dig: %v, stdout: %q, stderr: %q, err: %v",
cmd, stdout, stderr, err)
if stdout == "" {
return []string{}
} else {
return strings.Split(stdout, "\n")
}
}
func (t *dnsConfigMapTest) setConfigMap(cm *v1.ConfigMap, fedMap map[string]string, isValid bool) {
if isValid {
t.cm = cm
t.fedMap = fedMap
}
t.isValid = isValid
cm.ObjectMeta.Namespace = t.ns
cm.ObjectMeta.Name = t.name
options := v1.ListOptions{
FieldSelector: fields.Set{
"metadata.namespace": t.ns,
"metadata.name": t.name,
}.AsSelector().String(),
}
cmList, err := t.c.Core().ConfigMaps(t.ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(cmList.Items) == 0 {
By(fmt.Sprintf("Creating the ConfigMap (%s:%s) %+v", t.ns, t.name, *cm))
_, err := t.c.Core().ConfigMaps(t.ns).Create(cm)
Expect(err).NotTo(HaveOccurred())
} else {
By(fmt.Sprintf("Updating the ConfigMap (%s:%s) to %+v", t.ns, t.name, *cm))
_, err := t.c.Core().ConfigMaps(t.ns).Update(cm)
Expect(err).NotTo(HaveOccurred())
}
}
func (t *dnsConfigMapTest) deleteConfigMap() {
By(fmt.Sprintf("Deleting the ConfigMap (%s:%s)", t.ns, t.name))
t.cm = nil
t.isValid = false
err := t.c.Core().ConfigMaps(t.ns).Delete(t.name, nil)
Expect(err).NotTo(HaveOccurred())
}
func (t *dnsConfigMapTest) createUtilPod() {
// Actual port # doesn't matter, just need to exist.
const servicePort = 10101
t.utilPod = &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
},
ObjectMeta: v1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Labels: map[string]string{"app": "e2e-dns-configmap"},
GenerateName: "e2e-dns-configmap-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "util",
Image: "gcr.io/google_containers/dnsutils:e2e",
Command: []string{"sleep", "10000"},
Ports: []v1.ContainerPort{
{ContainerPort: servicePort, Protocol: "TCP"},
},
},
},
},
}
var err error
t.utilPod, err = t.c.Core().Pods(t.f.Namespace.Name).Create(t.utilPod)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created pod %v", t.utilPod)
Expect(t.f.WaitForPodRunning(t.utilPod.Name)).NotTo(HaveOccurred())
t.utilService = &v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
},
ObjectMeta: v1.ObjectMeta{
Namespace: t.f.Namespace.Name,
Name: "e2e-dns-configmap",
},
Spec: v1.ServiceSpec{
Selector: map[string]string{"app": "e2e-dns-configmap"},
Ports: []v1.ServicePort{
{
Protocol: "TCP",
Port: servicePort,
TargetPort: intstr.FromInt(servicePort),
},
},
},
}
t.utilService, err = t.c.Core().Services(t.f.Namespace.Name).Create(t.utilService)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Created service %v", t.utilService)
}
func (t *dnsConfigMapTest) deleteUtilPod() {
podClient := t.c.Core().Pods(t.f.Namespace.Name)
if err := podClient.Delete(t.utilPod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Logf("Delete of pod %v:%v failed: %v",
t.utilPod.Namespace, t.utilPod.Name, err)
}
}

View file

@ -0,0 +1,3 @@
{
"provider":"local"
}

299
vendor/k8s.io/kubernetes/test/e2e/e2e.go generated vendored Normal file
View file

@ -0,0 +1,299 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"path"
"sync"
"testing"
"time"
"github.com/golang/glog"
"github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/config"
"github.com/onsi/ginkgo/reporters"
"github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/runtime"
runtimeutils "k8s.io/apimachinery/pkg/util/runtime"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/logs"
commontest "k8s.io/kubernetes/test/e2e/common"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
testutils "k8s.io/kubernetes/test/utils"
)
const (
// imagePrePullingTimeout is the time we wait for the e2e-image-puller
// static pods to pull the list of seeded images. If they don't pull
// images within this time we simply log their output and carry on
// with the tests.
imagePrePullingTimeout = 5 * time.Minute
)
var (
cloudConfig = &framework.TestContext.CloudConfig
)
// setupProviderConfig validates and sets up cloudConfig based on framework.TestContext.Provider.
func setupProviderConfig() error {
switch framework.TestContext.Provider {
case "":
glog.Info("The --provider flag is not set. Treating as a conformance test. Some tests may not be run.")
case "gce", "gke":
var err error
framework.Logf("Fetching cloud provider for %q\r\n", framework.TestContext.Provider)
zone := framework.TestContext.CloudConfig.Zone
region, err := gcecloud.GetGCERegion(zone)
if err != nil {
return fmt.Errorf("error parsing GCE/GKE region from zone %q: %v", zone, err)
}
managedZones := []string{zone} // Only single-zone for now
cloudConfig.Provider, err = gcecloud.CreateGCECloud(framework.TestContext.CloudConfig.ProjectID, region, zone, managedZones, "" /* networkUrl */, nil /* nodeTags */, "" /* nodeInstancePerfix */, nil /* tokenSource */, false /* useMetadataServer */)
if err != nil {
return fmt.Errorf("Error building GCE/GKE provider: %v", err)
}
case "aws":
if cloudConfig.Zone == "" {
return fmt.Errorf("gce-zone must be specified for AWS")
}
}
return nil
}
// There are certain operations we only want to run once per overall test invocation
// (such as deleting old namespaces, or verifying that all system pods are running.
// Because of the way Ginkgo runs tests in parallel, we must use SynchronizedBeforeSuite
// to ensure that these operations only run on the first parallel Ginkgo node.
//
// This function takes two parameters: one function which runs on only the first Ginkgo node,
// returning an opaque byte array, and then a second function which runs on all Ginkgo nodes,
// accepting the byte array.
var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// Run only on Ginkgo node 1
if err := setupProviderConfig(); err != nil {
framework.Failf("Failed to setup provider config: %v", err)
}
c, err := framework.LoadClientset()
if err != nil {
glog.Fatal("Error loading client: ", err)
}
// Delete any namespaces except default and kube-system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault, federationapi.FederationNamespaceSystem})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
glog.Infof("Waiting for deletion of the following namespaces: %v", deleted)
if err := framework.WaitForNamespacesDeleted(c, deleted, framework.NamespaceCleanupTimeout); err != nil {
framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err)
}
}
// In large clusters we may get to this point but still have a bunch
// of nodes without Routes created. Since this would make a node
// unschedulable, we need to wait until all of them are schedulable.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
// Ensure all pods are running and ready before starting tests (otherwise,
// cluster infrastructure pods that are being pulled or started can block
// test pods from running, and tests that ensure all pods are running and
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels, true); err != nil {
framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf)
runKubernetesServiceTestContainer(c, v1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
if err := framework.WaitForPodsSuccess(c, api.NamespaceSystem, framework.ImagePullerLabels, imagePrePullingTimeout); err != nil {
// There is no guarantee that the image pulling will succeed in 3 minutes
// and we don't even run the image puller on all platforms (including GKE).
// We wait for it so we get an indication of failures in the logs, and to
// maximize benefit of image pre-pulling.
framework.Logf("WARNING: Image pulling pods failed to enter success in %v: %v", imagePrePullingTimeout, err)
}
// Dump the output of the nethealth containers only once per run
if framework.TestContext.DumpLogsOnFailure {
framework.Logf("Dumping network health container logs from all nodes")
framework.LogContainersInPodsWithLabels(c, api.NamespaceSystem, framework.ImagePullerLabels, "nethealth", framework.Logf)
}
// Reference common test to make the import valid.
commontest.CurrentSuite = commontest.E2E
return nil
}, func(data []byte) {
// Run on all Ginkgo nodes
if cloudConfig.Provider == nil {
if err := setupProviderConfig(); err != nil {
framework.Failf("Failed to setup provider config: %v", err)
}
}
})
type CleanupActionHandle *int
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
return p
}
// RemoveCleanupAction removes a function that was installed by
// AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
}
}()
// Run unlocked.
for _, fn := range list {
fn()
}
}
// Similar to SynchornizedBeforeSuite, we want to run some operations only once (such as collecting cluster logs).
// Here, the order of functions is reversed; first, the function which runs everywhere,
// and then the function that only runs on the first Ginkgo node.
var _ = ginkgo.SynchronizedAfterSuite(func() {
// Run on all Ginkgo nodes
RunCleanupActions()
}, func() {
// Run only Ginkgo on node 1
if framework.TestContext.ReportDir != "" {
framework.CoreDump(framework.TestContext.ReportDir)
}
})
// TestE2E checks configuration parameters (specified through flags) and then runs
// E2E tests using the Ginkgo runner.
// If a "report directory" is specified, one or more JUnit test reports will be
// generated in this directory, and cluster logs will also be saved.
// This function is called on each Ginkgo node in parallel mode.
func RunE2ETests(t *testing.T) {
runtimeutils.ReallyCrash = true
logs.InitLogs()
defer logs.FlushLogs()
gomega.RegisterFailHandler(ginkgo.Fail)
// Disable skipped tests unless they are explicitly requested.
if config.GinkgoConfig.FocusString == "" && config.GinkgoConfig.SkipString == "" {
config.GinkgoConfig.SkipString = `\[Flaky\]|\[Feature:.+\]`
}
// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins
var r []ginkgo.Reporter
if framework.TestContext.ReportDir != "" {
// TODO: we should probably only be trying to create this directory once
// rather than once-per-Ginkgo-node.
if err := os.MkdirAll(framework.TestContext.ReportDir, 0755); err != nil {
glog.Errorf("Failed creating report directory: %v", err)
} else {
r = append(r, reporters.NewJUnitReporter(path.Join(framework.TestContext.ReportDir, fmt.Sprintf("junit_%v%02d.xml", framework.TestContext.ReportPrefix, config.GinkgoConfig.ParallelNode))))
}
}
glog.Infof("Starting e2e run %q on Ginkgo node %d", framework.RunId, config.GinkgoConfig.ParallelNode)
ginkgo.RunSpecsWithDefaultAndCustomReporters(t, "Kubernetes e2e suite", r)
}
func podFromManifest(filename string) (*v1.Pod, error) {
var pod v1.Pod
framework.Logf("Parsing pod from %v", filename)
data := generated.ReadOrDie(filename)
json, err := utilyaml.ToJSON(data)
if err != nil {
return nil, err
}
if err := runtime.DecodeInto(api.Codecs.UniversalDecoder(), json, &pod); err != nil {
return nil, err
}
return &pod, nil
}
// Run a test container to try and contact the Kubernetes api-server from a pod, wait for it
// to flip to Ready, log its output and delete it.
func runKubernetesServiceTestContainer(c clientset.Interface, ns string) {
path := "test/images/clusterapi-tester/pod.yaml"
p, err := podFromManifest(path)
if err != nil {
framework.Logf("Failed to parse clusterapi-tester from manifest %v: %v", path, err)
return
}
p.Namespace = ns
if _, err := c.Core().Pods(ns).Create(p); err != nil {
framework.Logf("Failed to create %v: %v", p.Name, err)
return
}
defer func() {
if err := c.Core().Pods(ns).Delete(p.Name, nil); err != nil {
framework.Logf("Failed to delete pod %v: %v", p.Name, err)
}
}()
timeout := 5 * time.Minute
if err := framework.WaitForPodCondition(c, ns, p.Name, "clusterapi-tester", timeout, testutils.PodRunningReady); err != nil {
framework.Logf("Pod %v took longer than %v to enter running/ready: %v", p.Name, timeout, err)
return
}
logs, err := framework.GetPodLogs(c, ns, p.Name, p.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Failed to retrieve logs from %v: %v", p.Name, err)
} else {
framework.Logf("Output of clusterapi-tester:\n%v", logs)
}
}

31
vendor/k8s.io/kubernetes/test/e2e/e2e_test.go generated vendored Normal file
View file

@ -0,0 +1,31 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"testing"
"k8s.io/kubernetes/test/e2e/framework"
)
func init() {
framework.ViperizeFlags()
}
func TestE2E(t *testing.T) {
RunE2ETests(t)
}

43
vendor/k8s.io/kubernetes/test/e2e/empty.go generated vendored Normal file
View file

@ -0,0 +1,43 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("Empty [Feature:Empty]", func() {
f := framework.NewDefaultFramework("empty")
BeforeEach(func() {
c := f.ClientSet
ns := f.Namespace.Name
// TODO: respect --allow-notready-nodes flag in those functions.
framework.ExpectNoError(framework.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout))
framework.WaitForAllNodesHealthy(c, time.Minute)
err := framework.CheckTestingNSDeletedExcept(c, ns)
framework.ExpectNoError(err)
})
It("does nothing", func() {})
})

390
vendor/k8s.io/kubernetes/test/e2e/empty_dir_wrapper.go generated vendored Normal file
View file

@ -0,0 +1,390 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
"fmt"
"strconv"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// These numbers are obtained empirically.
// If you make them too low, you'll get flaky
// tests instead of failing ones if the race bug reappears.
// If you make volume counts or pod counts too high,
// the tests may fail because mounting configmap/git_repo
// volumes is not very fast and the tests may time out
// waiting for pods to become Running.
// And of course the higher are the numbers, the
// slower are the tests.
wrappedVolumeRaceConfigMapVolumeCount = 50
wrappedVolumeRaceConfigMapPodCount = 5
wrappedVolumeRaceConfigMapIterationCount = 3
wrappedVolumeRaceGitRepoVolumeCount = 50
wrappedVolumeRaceGitRepoPodCount = 5
wrappedVolumeRaceGitRepoIterationCount = 3
wrappedVolumeRaceRCNamePrefix = "wrapped-volume-race-"
)
var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
f := framework.NewDefaultFramework("emptydir-wrapper")
It("should not conflict [Volume]", func() {
name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume"
secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Data: map[string][]byte{
"data-1": []byte("value-1\n"),
},
}
var err error
if secret, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret); err != nil {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
gitVolumeName := "git-volume"
gitVolumeMountPath := "/etc/git-volume"
gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup()
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
Spec: v1.PodSpec{
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: gitVolumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
},
},
Containers: []v1.Container{
{
Name: "secret-test",
Image: "gcr.io/google_containers/test-webserver:e2e",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
ReadOnly: true,
},
{
Name: gitVolumeName,
MountPath: gitVolumeMountPath,
},
},
},
},
},
}
pod = f.PodClient().CreateSync(pod)
defer func() {
By("Cleaning up the secret")
if err := f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, nil); err != nil {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
}
}()
})
// The following two tests check for the problem fixed in #29641.
// In order to reproduce it you need to revert the fix, e.g. via
// git revert -n df1e925143daf34199b55ffb91d0598244888cce
// or
// curl -sL https://github.com/kubernetes/kubernetes/pull/29641.patch | patch -p1 -R
//
// After that these tests will fail because some of the pods
// they create never enter Running state.
//
// They need to be [Serial] and [Slow] because they try to induce
// the race by creating pods with many volumes and container volume mounts,
// which takes considerable time and may interfere with other tests.
//
// Probably should also try making tests for secrets and downwardapi,
// but these cases are harder because tmpfs-based emptyDir
// appears to be less prone to the race problem.
It("should not cause race condition when used for configmaps [Serial] [Slow] [Volume]", func() {
configMapNames := createConfigmapsForRace(f)
defer deleteConfigMaps(f, configMapNames)
volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
}
})
It("should not cause race condition when used for git_repo [Serial] [Slow] [Volume]", func() {
gitURL, gitRepo, cleanup := createGitServer(f)
defer cleanup()
volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
testNoWrappedVolumeRace(f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
}
})
})
func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
var err error
gitServerPodName := "git-server-" + string(uuid.NewUUID())
containerPort := 8000
labels := map[string]string{"name": gitServerPodName}
gitServerPod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: gitServerPodName,
Labels: labels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "git-repo",
Image: "gcr.io/google_containers/fakegitserver:0.1",
ImagePullPolicy: "IfNotPresent",
Ports: []v1.ContainerPort{
{ContainerPort: int32(containerPort)},
},
},
},
},
}
f.PodClient().CreateSync(gitServerPod)
// Portal IP and port
httpPort := 2345
gitServerSvc := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: "git-server-svc",
},
Spec: v1.ServiceSpec{
Selector: labels,
Ports: []v1.ServicePort{
{
Name: "http-portal",
Port: int32(httpPort),
TargetPort: intstr.FromInt(containerPort),
},
},
},
}
if gitServerSvc, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(gitServerSvc); err != nil {
framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
}
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod")
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
By("Cleaning up the git server svc")
if err := f.ClientSet.Core().Services(f.Namespace.Name).Delete(gitServerSvc.Name, nil); err != nil {
framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
}
}
}
func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
volumeName := fmt.Sprintf("racey-git-repo-%d", i)
volumes = append(volumes, v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
})
}
return
}
func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i)
configMapNames = append(configMapNames, configMapName)
configMap := &v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: configMapName,
},
Data: map[string]string{
"data-1": "value-1",
},
}
_, err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Create(configMap)
framework.ExpectNoError(err)
}
return
}
func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
By("Cleaning up the configMaps")
for _, configMapName := range configMapNames {
err := f.ClientSet.Core().ConfigMaps(f.Namespace.Name).Delete(configMapName, nil)
Expect(err).NotTo(HaveOccurred(), "unable to delete configMap %v", configMapName)
}
}
func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i, configMapName := range configMapNames {
volumeName := fmt.Sprintf("racey-configmap-%d", i)
volumes = append(volumes, v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "data-1",
},
},
},
},
})
volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/config-%d", i),
})
}
return
}
func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
targetNode := nodeList.Items[0]
By("Creating RC which spawns configmap-volume pods")
affinity := &v1.Affinity{
NodeAffinity: &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{
{
MatchExpressions: []v1.NodeSelectorRequirement{
{
Key: "kubernetes.io/hostname",
Operator: v1.NodeSelectorOpIn,
Values: []string{targetNode.Name},
},
},
},
},
},
},
}
rc := &v1.ReplicationController{
ObjectMeta: v1.ObjectMeta{
Name: rcName,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &podCount,
Selector: map[string]string{
"name": rcName,
},
Template: &v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": rcName},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "10000"},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceCPU: resource.MustParse("10m"),
},
},
VolumeMounts: volumeMounts,
},
},
Affinity: affinity,
DNSPolicy: v1.DNSDefault,
Volumes: volumes,
},
},
},
}
_, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(rc)
Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() {
err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()
pods, err := framework.PodsCreated(f.ClientSet, f.Namespace.Name, rcName, podCount)
By("Ensuring each pod is running")
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
}
err = f.WaitForPodRunning(pod.Name)
Expect(err).NotTo(HaveOccurred(), "Failed waiting for pod %s to enter running state", pod.Name)
}
}

138
vendor/k8s.io/kubernetes/test/e2e/etcd_failure.go generated vendored Normal file
View file

@ -0,0 +1,138 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {
f := framework.NewDefaultFramework("etcd-failure")
BeforeEach(func() {
// This test requires:
// - SSH
// - master access
// ... so the provider check should be identical to the intersection of
// providers that provide those capabilities.
framework.SkipUnlessProviderIs("gce")
Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet,
Name: "baz",
Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet),
Replicas: 1,
})).NotTo(HaveOccurred())
})
It("should recover from network partition with master", func() {
etcdFailTest(
f,
"sudo iptables -A INPUT -p tcp --destination-port 2379 -j DROP",
"sudo iptables -D INPUT -p tcp --destination-port 2379 -j DROP",
)
})
It("should recover from SIGKILL", func() {
etcdFailTest(
f,
"pgrep etcd | xargs -I {} sudo kill -9 {}",
"echo 'do nothing. monit should restart etcd.'",
)
})
})
func etcdFailTest(f *framework.Framework, failCommand, fixCommand string) {
doEtcdFailure(failCommand, fixCommand)
checkExistingRCRecovers(f)
ServeImageOrFail(f, "basic", "gcr.io/google_containers/serve_hostname:v1.4")
}
// For this duration, etcd will be failed by executing a failCommand on the master.
// If repeat is true, the failCommand will be called at a rate of once per second for
// the failure duration. If repeat is false, failCommand will only be called once at the
// beginning of the failure duration. After this duration, we execute a fixCommand on the
// master and go on to assert that etcd and kubernetes components recover.
const etcdFailureDuration = 20 * time.Second
func doEtcdFailure(failCommand, fixCommand string) {
By("failing etcd")
masterExec(failCommand)
time.Sleep(etcdFailureDuration)
masterExec(fixCommand)
}
func masterExec(cmd string) {
result, err := framework.SSH(cmd, framework.GetMasterHost()+":22", framework.TestContext.Provider)
Expect(err).NotTo(HaveOccurred())
if result.Code != 0 {
framework.LogSSHResult(result)
framework.Failf("master exec command returned non-zero")
}
}
func checkExistingRCRecovers(f *framework.Framework) {
By("assert that the pre-existing replication controller recovers")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
rcSelector := labels.Set{"name": "baz"}.AsSelector()
By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
return false, nil
}
if len(pods.Items) == 0 {
return false, nil
}
for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")
return true, nil
}))
By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
return true, nil
}
}
return false, nil
}))
}

130
vendor/k8s.io/kubernetes/test/e2e/events.go generated vendored Normal file
View file

@ -0,0 +1,130 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strconv"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Events", func() {
f := framework.NewDefaultFramework("events")
It("should be sent by kubelets and the scheduler about pods scheduling and running [Conformance]", func() {
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
By("creating the pod")
name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "p",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
},
}
By("submitting the pod to kubernetes")
defer func() {
By("deleting the pod")
podClient.Delete(pod.Name, nil)
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create pod: %v", err)
}
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1))
By("retrieving the pod")
podWithUid, err := podClient.Get(pod.Name, metav1.GetOptions{})
if err != nil {
framework.Failf("Failed to get pod: %v", err)
}
fmt.Printf("%+v\n", podWithUid)
var events *v1.EventList
// Check for scheduler event about the pod.
By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.namespace": f.Namespace.Name,
"source": v1.DefaultSchedulerName,
}.AsSelector().String()
options := v1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
}
if len(events.Items) > 0 {
fmt.Println("Saw scheduler event for our pod.")
return true, nil
}
return false, nil
}))
// Check for kubelet event about the pod.
By("checking for kubelet event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
selector := fields.Set{
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name,
"source": "kubelet",
}.AsSelector().String()
options := v1.ListOptions{FieldSelector: selector}
events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
}
if len(events.Items) > 0 {
fmt.Println("Saw kubelet event for our pod.")
return true, nil
}
return false, nil
}))
})
})

View file

@ -0,0 +1,157 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
dnsReadyTimeout = time.Minute
)
const queryDnsPythonTemplate string = `
import socket
try:
socket.gethostbyname('%s')
print 'ok'
except:
print 'err'`
var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
f := framework.NewDefaultFramework("cluster-dns")
var c clientset.Interface
BeforeEach(func() {
c = f.ClientSet
})
It("should create pod that uses dns", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/cluster-dns", file)
}
// contrary to the example, this test does not use contexts, for simplicity
// namespaces are passed directly.
// Also, for simplicity, we don't use yamls with namespaces, but we
// create testing namespaces instead.
backendRcYaml := mkpath("dns-backend-rc.yaml")
backendRcName := "dns-backend"
backendSvcYaml := mkpath("dns-backend-service.yaml")
backendSvcName := "dns-backend"
backendPodName := "dns-backend"
frontendPodYaml := mkpath("dns-frontend-pod.yaml")
frontendPodName := "dns-frontend"
frontendPodContainerName := "dns-frontend"
podOutput := "Hello World!"
// we need two namespaces anyway, so let's forget about
// the one created in BeforeEach and create two new ones.
namespaces := []*v1.Namespace{nil, nil}
for i := range namespaces {
var err error
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
Expect(err).NotTo(HaveOccurred())
}
for _, ns := range namespaces {
framework.RunKubectlOrDie("create", "-f", backendRcYaml, getNsCmdFlag(ns))
}
for _, ns := range namespaces {
framework.RunKubectlOrDie("create", "-f", backendSvcYaml, getNsCmdFlag(ns))
}
// wait for objects
for _, ns := range namespaces {
framework.WaitForControlledPodsRunning(c, ns.Name, backendRcName, api.Kind("ReplicationController"))
framework.WaitForService(c, ns.Name, backendSvcName, true, framework.Poll, framework.ServiceStartTimeout)
}
// it is not enough that pods are running because they may be set to running, but
// the application itself may have not been initialized. Just query the application.
for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
Expect(err).NotTo(HaveOccurred(), "waiting for all pods to respond")
framework.Logf("found %d backend pods responding in namespace %s", len(pods.Items), ns.Name)
err = framework.ServiceResponding(c, ns.Name, backendSvcName)
Expect(err).NotTo(HaveOccurred(), "waiting for the service to respond")
}
// Now another tricky part:
// It may happen that the service name is not yet in DNS.
// So if we start our pod, it will fail. We must make sure
// the name is already resolvable. So let's try to query DNS from
// the pod we have, until we find our service name.
// This complicated code may be removed if the pod itself retried after
// dns error or timeout.
// This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 {
framework.Failf("no running pods found")
}
podName := pods.Items[0].Name
queryDns := fmt.Sprintf(queryDnsPythonTemplate, backendSvcName+"."+namespaces[0].Name)
_, err = framework.LookForStringInPodExec(namespaces[0].Name, podName, []string{"python", "-c", queryDns}, "ok", dnsReadyTimeout)
Expect(err).NotTo(HaveOccurred(), "waiting for output from pod exec")
updatedPodYaml := prepareResourceWithReplacedString(frontendPodYaml, "dns-backend.development.cluster.local", fmt.Sprintf("dns-backend.%s.svc.cluster.local", namespaces[0].Name))
// create a pod in each namespace
for _, ns := range namespaces {
framework.NewKubectlCommand("create", "-f", "-", getNsCmdFlag(ns)).WithStdinData(updatedPodYaml).ExecOrDie()
}
// wait until the pods have been scheduler, i.e. are not Pending anymore. Remember
// that we cannot wait for the pods to be running because our pods terminate by themselves.
for _, ns := range namespaces {
err := framework.WaitForPodNotPending(c, ns.Name, frontendPodName, "")
framework.ExpectNoError(err)
}
// wait for pods to print their result
for _, ns := range namespaces {
_, err := framework.LookForStringInLog(ns.Name, frontendPodName, frontendPodContainerName, podOutput, framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
})
func getNsCmdFlag(ns *v1.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}

187
vendor/k8s.io/kubernetes/test/e2e/example_k8petstore.go generated vendored Normal file
View file

@ -0,0 +1,187 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"os"
"os/exec"
"path/filepath"
"strconv"
"syscall"
"time"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
k8bpsContainerVersion = "r.2.8.19" // Container version, see the examples/k8petstore dockerfiles for details.
k8bpsThroughputDummy = "0" // Polling time = 0, since we framework.Poll in ginkgo rather than using the shell script tests.
k8bpsRedisSlaves = "1" // Number of redis slaves.
k8bpsDontRunTest = "0" // Don't bother embedded test.
k8bpsStartupTimeout = 30 * time.Second // Amount of elapsed time before petstore transactions are being stored.
k8bpsMinTransactionsOnStartup = 3 // Amount of transactions we expect we should have before data generator starts.
// Constants for the first test. We can make this a hashmap once we add scale tests to it.
k8bpsSmokeTestFinalTransactions = 50
k8bpsSmokeTestTimeout = 60 * time.Second
)
// readTransactions reads # of transactions from the k8petstore web server endpoint.
// for more details see the source of the k8petstore web server.
func readTransactions(c clientset.Interface, ns string) (error, int) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
return errProxy, -1
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
body, err := proxyRequest.Namespace(ns).
Context(ctx).
Name("frontend").
Suffix("llen").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("Failed to read petstore transactions: %v", err)
}
return err, -1
}
totalTrans, err := strconv.Atoi(string(body))
return err, totalTrans
}
// runK8petstore runs the k8petstore application, bound to external nodeport, and
// polls until finalTransactionsExpected transactions are acquired, in a maximum of maxSeconds.
func runK8petstore(restServers int, loadGenerators int, c clientset.Interface, ns string, finalTransactionsExpected int, maxTime time.Duration) {
var err error = nil
k8bpsScriptLocation := filepath.Join(framework.TestContext.RepoRoot, "examples/k8petstore/k8petstore-nodeport.sh")
cmd := exec.Command(
k8bpsScriptLocation,
framework.TestContext.KubectlPath,
k8bpsContainerVersion,
k8bpsThroughputDummy,
strconv.Itoa(restServers),
strconv.Itoa(loadGenerators),
k8bpsRedisSlaves,
k8bpsDontRunTest, // Don't bother embedded test.
ns,
)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
framework.Logf("Starting k8petstore application....")
// Run the k8petstore app, and log / fail if it returns any errors.
// This should return quickly, assuming containers are downloaded.
if err = cmd.Start(); err != nil {
framework.Failf("%v", err)
}
// Make sure there are no command errors.
if err = cmd.Wait(); err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
framework.Logf("Exit Status: %d", status.ExitStatus())
}
}
}
Expect(err).NotTo(HaveOccurred())
framework.Logf("... Done starting k8petstore ")
totalTransactions := 0
framework.Logf("Start polling, timeout is %v seconds", maxTime)
// How long until the FIRST transactions are created.
startupTimeout := time.After(time.Duration(k8bpsStartupTimeout))
// Maximum time to wait until we reach the nth transaction.
transactionsCompleteTimeout := time.After(time.Duration(maxTime))
tick := time.Tick(2 * time.Second)
var ready = false
framework.Logf("Now waiting %v seconds to see progress (transactions > 3)", k8bpsStartupTimeout)
T:
for {
select {
case <-transactionsCompleteTimeout:
framework.Logf("Completion timeout %v reached, %v transactions not complete. Breaking!", time.Duration(maxTime), finalTransactionsExpected)
break T
case <-tick:
// Don't fail if there's an error. We expect a few failures might happen in the cloud.
err, totalTransactions = readTransactions(c, ns)
if err == nil {
framework.Logf("PetStore : Time: %v, %v = total petstore transactions stored into redis.", time.Now(), totalTransactions)
if totalTransactions >= k8bpsMinTransactionsOnStartup {
ready = true
}
if totalTransactions >= finalTransactionsExpected {
break T
}
} else {
if ready {
framework.Logf("Blip: during polling: %v", err)
} else {
framework.Logf("Not ready yet: %v", err)
}
}
case <-startupTimeout:
if !ready {
framework.Logf("Startup Timeout %v reached: Its been too long and we still haven't started accumulating %v transactions!", startupTimeout, k8bpsMinTransactionsOnStartup)
break T
}
}
}
// We should have exceeded the finalTransactionsExpected num of transactions.
// If this fails, but there are transactions being created, we may need to recalibrate
// the finalTransactionsExpected value - or else - your cluster is broken/slow !
Expect(totalTransactions).To(BeNumerically(">", finalTransactionsExpected))
}
var _ = framework.KubeDescribe("Pet Store [Feature:Example]", func() {
BeforeEach(func() {
// The shell scripts in k8petstore break on jenkins... Pure golang rewrite is in progress.
framework.SkipUnlessProviderIs("local")
})
// The number of nodes dictates total number of generators/transaction expectations.
var nodeCount int
f := framework.NewDefaultFramework("petstore")
It(fmt.Sprintf("should scale to persist a nominal number ( %v ) of transactions in %v seconds", k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout), func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
loadGenerators := nodeCount
restServers := nodeCount
fmt.Printf("load generators / rest servers [ %v / %v ] ", loadGenerators, restServers)
runK8petstore(restServers, loadGenerators, f.ClientSet, f.Namespace.Name, k8bpsSmokeTestFinalTransactions, k8bpsSmokeTestTimeout)
})
})

627
vendor/k8s.io/kubernetes/test/e2e/examples.go generated vendored Normal file
View file

@ -0,0 +1,627 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"regexp"
"strings"
"sync"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/generated"
testutils "k8s.io/kubernetes/test/utils"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
serverStartTimeout = framework.PodStartTimeout + 3*time.Minute
)
var _ = framework.KubeDescribe("[Feature:Example]", func() {
f := framework.NewDefaultFramework("examples")
// Reusable cluster state function. This won't be adversly affected by lazy initialization of framework.
clusterState := func(selectorKey string, selectorValue string) *framework.ClusterVerification {
return f.NewClusterVerification(
f.Namespace,
framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue},
ValidPhases: []v1.PodPhase{v1.PodRunning},
})
}
// Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(v1.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn)
}
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches
framework.BindClusterRoleInNamespace(c.Rbac(), "edit", f.Namespace.Name,
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(c.Authorization(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
f.Namespace.Name, "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
})
framework.KubeDescribe("Redis", func() {
It("should create and stop redis servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/redis", file)
}
bootstrapYaml := mkpath("redis-master.yaml")
sentinelServiceYaml := mkpath("redis-sentinel-service.yaml")
sentinelControllerYaml := mkpath("redis-sentinel-controller.yaml")
controllerYaml := mkpath("redis-controller.yaml")
bootstrapPodName := "redis-master"
redisRC := "redis"
sentinelRC := "redis-sentinel"
nsFlag := fmt.Sprintf("--namespace=%v", ns)
expectedOnServer := "The server is now ready to accept connections"
expectedOnSentinel := "+monitor master"
By("starting redis bootstrap")
framework.RunKubectlOrDie("create", "-f", bootstrapYaml, nsFlag)
err := framework.WaitForPodNameRunningInNamespace(c, bootstrapPodName, ns)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "master", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, bootstrapPodName, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("setting up services and controllers")
framework.RunKubectlOrDie("create", "-f", sentinelServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", sentinelControllerYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{sentinelRC: "true"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
label = labels.SelectorFromSet(labels.Set(map[string]string{"name": redisRC}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
By("scaling up the deployment")
framework.RunKubectlOrDie("scale", "rc", redisRC, "--replicas=3", nsFlag)
framework.RunKubectlOrDie("scale", "rc", sentinelRC, "--replicas=3", nsFlag)
framework.WaitForRCToStabilize(c, ns, redisRC, framework.PodReadyBeforeTimeout)
framework.WaitForRCToStabilize(c, ns, sentinelRC, framework.PodReadyBeforeTimeout)
By("checking up the services")
checkAllLogs := func() {
selectorKey, selectorValue := "name", redisRC
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
selectorKey, selectorValue = sentinelRC, "true"
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
}
})
}
checkAllLogs()
By("turning down bootstrap")
framework.RunKubectlOrDie("delete", "-f", bootstrapYaml, nsFlag)
err = framework.WaitForRCPodToDisappear(c, ns, redisRC, bootstrapPodName)
Expect(err).NotTo(HaveOccurred())
By("waiting for the new master election")
checkAllLogs()
})
})
framework.KubeDescribe("Spark", func() {
It("should start spark master, driver and workers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/spark", file)
}
// TODO: Add Zepplin and Web UI to this example.
serviceYaml := mkpath("spark-master-service.yaml")
masterYaml := mkpath("spark-master-controller.yaml")
workerControllerYaml := mkpath("spark-worker-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
master := func() {
By("starting master")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", masterYaml, nsFlag)
selectorKey, selectorValue := "component", "spark-master"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names.
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil {
framework.Failf("Didn't find target string. error:", maErr)
}
})
}
worker := func() {
By("starting workers")
framework.Logf("Now starting Workers")
framework.RunKubectlOrDie("create", "-f", workerControllerYaml, nsFlag)
selectorKey, selectorValue := "component", "spark-worker"
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
// For now, scaling is orthogonal to the core test.
// framework.ScaleRC(c, ns, "spark-worker-controller", 2, true)
framework.Logf("Now polling for worker startup...")
forEachPod(selectorKey, selectorValue,
func(pod v1.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred())
})
}
// Run the worker verification after we turn up the master.
defer worker()
master()
})
})
framework.KubeDescribe("Cassandra", func() {
It("should create and scale cassandra", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
}
serviceYaml := mkpath("cassandra-service.yaml")
controllerYaml := mkpath("cassandra-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("Starting the cassandra service")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.Logf("wait for service")
err := framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
Expect(err).NotTo(HaveOccurred())
// Create an RC with n nodes in it. Each node will then be verified.
By("Creating a Cassandra RC")
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("app", "cassandra", func(pod v1.Pod) {
framework.Logf("Verifying pod %v ", pod.Name)
// TODO how do we do this better? Ready Probe?
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
})
})
framework.KubeDescribe("CassandraStatefulSet", func() {
It("should create statefulset", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/cassandra", file)
}
serviceYaml := mkpath("cassandra-service.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
// have to change dns prefix because of the dynamic namespace
input, err := ioutil.ReadFile(mkpath("cassandra-statefulset.yaml"))
Expect(err).NotTo(HaveOccurred())
output := strings.Replace(string(input), "cassandra-0.cassandra.default.svc.cluster.local", "cassandra-0.cassandra."+ns+".svc.cluster.local", -1)
statefulsetYaml := "/tmp/cassandra-statefulset.yaml"
err = ioutil.WriteFile(statefulsetYaml, []byte(output), 0644)
Expect(err).NotTo(HaveOccurred())
By("Starting the cassandra service")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.Logf("wait for service")
err = framework.WaitForService(c, ns, "cassandra", true, framework.Poll, framework.ServiceRespondingTimeout)
Expect(err).NotTo(HaveOccurred())
// Create an StatefulSet with n nodes in it. Each node will then be verified.
By("Creating a Cassandra StatefulSet")
framework.RunKubectlOrDie("create", "-f", statefulsetYaml, nsFlag)
statefulsetPoll := 30 * time.Second
statefulsetTimeout := 10 * time.Minute
// TODO - parse this number out of the yaml
numPets := 3
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()})
if err != nil {
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
}
framework.ExpectNoError(err)
if len(podList.Items) < numPets {
framework.Logf("Found %d pets, waiting for %d", len(podList.Items), numPets)
return false, nil
}
if len(podList.Items) > numPets {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
isReady := v1.IsPodReady(&p)
if p.Status.Phase != v1.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
framework.Failf("Cassandra pod ip %s is not reporting Up and Normal 'UN' via nodetool status", pod.Status.PodIP)
}
})
// using out of statefulset e2e as deleting pvc is a pain
deleteAllStatefulSets(c, ns)
})
})
framework.KubeDescribe("Storm", func() {
It("should create and stop Zookeeper, Nimbus and Storm worker servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storm", file)
}
zookeeperServiceJson := mkpath("zookeeper-service.json")
zookeeperPodJson := mkpath("zookeeper.json")
nimbusServiceJson := mkpath("storm-nimbus-service.json")
nimbusPodJson := mkpath("storm-nimbus.json")
workerControllerJson := mkpath("storm-worker-controller.json")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
zookeeperPod := "zookeeper"
nimbusPod := "nimbus"
By("starting Zookeeper")
framework.RunKubectlOrDie("create", "-f", zookeeperPodJson, nsFlag)
framework.RunKubectlOrDie("create", "-f", zookeeperServiceJson, nsFlag)
err := f.WaitForPodRunningSlow(zookeeperPod)
Expect(err).NotTo(HaveOccurred())
By("checking if zookeeper is up and running")
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "binding to port", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForEndpoint(c, ns, "zookeeper")
Expect(err).NotTo(HaveOccurred())
By("starting Nimbus")
framework.RunKubectlOrDie("create", "-f", nimbusPodJson, nsFlag)
framework.RunKubectlOrDie("create", "-f", nimbusServiceJson, nsFlag)
err = f.WaitForPodRunningSlow(nimbusPod)
Expect(err).NotTo(HaveOccurred())
err = framework.WaitForEndpoint(c, ns, "nimbus")
Expect(err).NotTo(HaveOccurred())
By("starting workers")
framework.RunKubectlOrDie("create", "-f", workerControllerJson, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("name", "storm-worker", func(pod v1.Pod) {
//do nothing, just wait for the pod to be running
})
// TODO: Add logging configuration to nimbus & workers images and then
// look for a string instead of sleeping.
time.Sleep(20 * time.Second)
By("checking if there are established connections to Zookeeper")
_, err = framework.LookForStringInLog(ns, zookeeperPod, "zookeeper", "Established session", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking if Nimbus responds to requests")
framework.LookForString("No topologies running.", time.Minute, func() string {
return framework.RunKubectlOrDie("exec", "nimbus", nsFlag, "--", "bin/storm", "list")
})
})
})
framework.KubeDescribe("Liveness", func() {
It("liveness pods should be automatically restarted", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/liveness", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
execYaml := mkpath("exec-liveness.yaml")
httpYaml := mkpath("http-liveness.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, execYaml), nsFlag)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, httpYaml), nsFlag)
// Since both containers start rapidly, we can easily run this test in parallel.
var wg sync.WaitGroup
passed := true
checkRestart := func(podName string, timeout time.Duration) {
err := framework.WaitForPodNameRunningInNamespace(c, podName, ns)
Expect(err).NotTo(HaveOccurred())
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Core().Pods(ns).Get(podName, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
if stat.RestartCount > 0 {
framework.Logf("Saw %v restart, succeeded...", podName)
wg.Done()
return
}
}
framework.Logf("Failed waiting for %v restart! ", podName)
passed = false
wg.Done()
}
By("Check restarts")
// Start the "actual test", and wait for both pods to complete.
// If 2 fail: Something is broken with the test (or maybe even with liveness).
// If 1 fails: Its probably just an error in the examples/ files themselves.
wg.Add(2)
for _, c := range []string{"liveness-http", "liveness-exec"} {
go checkRestart(c, 2*time.Minute)
}
wg.Wait()
if !passed {
framework.Failf("At least one liveness example failed. See the logs above.")
}
})
})
framework.KubeDescribe("Secret", func() {
It("should create a pod that reads a secret", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/secrets", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
secretYaml := mkpath("secret.yaml")
podYaml := mkpath("secret-pod.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
podName := "secret-test-pod"
By("creating secret and pod")
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, secretYaml), nsFlag)
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns, "")
Expect(err).NotTo(HaveOccurred())
By("checking if secret was read correctly")
_, err = framework.LookForStringInLog(ns, "secret-test-pod", "test-container", "value-1", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
framework.KubeDescribe("Downward API", func() {
It("should create a pod that prints his name and namespace", func() {
mkpath := func(file string) string {
path := filepath.Join("test/fixtures/doc-yaml/user-guide/downward-api", file)
framework.ExpectNoError(createFileForGoBinData(path, path))
return path
}
podYaml := mkpath("dapi-pod.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
podName := "dapi-test-pod"
By("creating the pod")
framework.RunKubectlOrDie("create", "-f", filepath.Join(framework.TestContext.OutputDir, podYaml), nsFlag)
err := framework.WaitForPodNoLongerRunningInNamespace(c, podName, ns, "")
Expect(err).NotTo(HaveOccurred())
By("checking if name and namespace were passed correctly")
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAMESPACE=%v", ns), serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, podName, "test-container", fmt.Sprintf("MY_POD_NAME=%v", podName), serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
framework.KubeDescribe("RethinkDB", func() {
It("should create and stop rethinkdb servers", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/rethinkdb", file)
}
driverServiceYaml := mkpath("driver-service.yaml")
rethinkDbControllerYaml := mkpath("rc.yaml")
adminPodYaml := mkpath("admin-pod.yaml")
adminServiceYaml := mkpath("admin-service.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting rethinkdb")
framework.RunKubectlOrDie("create", "-f", driverServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", rethinkDbControllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"db": "rethinkdb"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
checkDbInstances := func() {
forEachPod("db", "rethinkdb", func(pod v1.Pod) {
_, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
}
checkDbInstances()
err = framework.WaitForEndpoint(c, ns, "rethinkdb-driver")
Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb")
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true)
checkDbInstances()
By("starting admin")
framework.RunKubectlOrDie("create", "-f", adminServiceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", adminPodYaml, nsFlag)
err = framework.WaitForPodNameRunningInNamespace(c, "rethinkdb-admin", ns)
Expect(err).NotTo(HaveOccurred())
checkDbInstances()
content, err := makeHttpRequestToService(c, ns, "rethinkdb-admin", "/", framework.EndpointRegisterTimeout)
Expect(err).NotTo(HaveOccurred())
if !strings.Contains(content, "<title>RethinkDB Administration Console</title>") {
framework.Failf("RethinkDB console is not running")
}
})
})
framework.KubeDescribe("Hazelcast", func() {
It("should create and scale hazelcast", func() {
mkpath := func(file string) string {
return filepath.Join(framework.TestContext.RepoRoot, "examples/storage/hazelcast", file)
}
serviceYaml := mkpath("hazelcast-service.yaml")
controllerYaml := mkpath("hazelcast-controller.yaml")
nsFlag := fmt.Sprintf("--namespace=%v", ns)
By("starting hazelcast")
framework.RunKubectlOrDie("create", "-f", serviceYaml, nsFlag)
framework.RunKubectlOrDie("create", "-f", controllerYaml, nsFlag)
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
err = framework.WaitForEndpoint(c, ns, "hazelcast")
Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast")
framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true)
forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
})
})
func makeHttpRequestToService(c clientset.Interface, ns, service, path string, timeout time.Duration) (string, error) {
var result []byte
var err error
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(c, c.Core().RESTClient().Get())
if errProxy != nil {
break
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
result, err = proxyRequest.Namespace(ns).
Context(ctx).
Name(service).
Suffix(path).
Do().
Raw()
if err != nil {
break
}
}
return string(result), err
}
// pass enough context with the 'old' parameter so that it replaces what your really intended.
func prepareResourceWithReplacedString(inputFile, old, new string) string {
f, err := os.Open(inputFile)
Expect(err).NotTo(HaveOccurred())
defer f.Close()
data, err := ioutil.ReadAll(f)
Expect(err).NotTo(HaveOccurred())
podYaml := strings.Replace(string(data), old, new, 1)
return podYaml
}
func createFileForGoBinData(gobindataPath, outputFilename string) error {
data := generated.ReadOrDie(gobindataPath)
if len(data) == 0 {
return fmt.Errorf("Failed to read gobindata from %v", gobindataPath)
}
fullPath := filepath.Join(framework.TestContext.OutputDir, outputFilename)
err := os.MkdirAll(filepath.Dir(fullPath), 0777)
if err != nil {
return fmt.Errorf("Error while creating directory %v: %v", filepath.Dir(fullPath), err)
}
err = ioutil.WriteFile(fullPath, data, 0644)
if err != nil {
return fmt.Errorf("Error while trying to write to file %v: %v", fullPath, err)
}
return nil
}

486
vendor/k8s.io/kubernetes/test/e2e/federated-ingress.go generated vendored Normal file
View file

@ -0,0 +1,486 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"net/http"
"os"
"reflect"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
MaxRetriesOnFederatedApiserver = 3
FederatedIngressTimeout = 120 * time.Second
FederatedIngressName = "federated-ingress"
FederatedIngressServiceName = "federated-ingress-service"
FederatedIngressServicePodName = "federated-ingress-service-test-pod"
)
var _ = framework.KubeDescribe("Federated ingresses [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federated-ingress")
// Create/delete ingress api objects
// Validate federation apiserver, does not rely on underlying clusters or federation ingress controller.
Describe("Federated Ingresses", func() {
AfterEach(func() {
nsName := f.FederationNamespace.Name
// Delete all ingresses.
deleteAllIngressesOrFail(f.FederationClientset_1_5, nsName)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
nsName := f.FederationNamespace.Name
ingress := createIngressOrFail(f.FederationClientset_1_5, nsName)
By(fmt.Sprintf("Creation of ingress %q in namespace %q succeeded. Deleting ingress.", ingress.Name, nsName))
// Cleanup
err := f.FederationClientset_1_5.Extensions().Ingresses(nsName).Delete(ingress.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting ingress %q in namespace %q", ingress.Name, ingress.Namespace)
By(fmt.Sprintf("Deletion of ingress %q in namespace %q succeeded.", ingress.Name, nsName))
})
})
// e2e cases for federation ingress controller
var _ = Describe("Federated Ingresses", func() {
var (
clusters map[string]*cluster // All clusters, keyed by cluster name
primaryClusterName, federationName, ns string
jig *federationTestJig
)
// register clusters in federation apiserver
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
framework.SkipUnlessProviderIs("gce", "gke") // TODO: Federated ingress is not yet supported on non-GCP platforms.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
jig = newFederationTestJig(f.FederationClientset_1_5)
clusters = map[string]*cluster{}
primaryClusterName = registerClusters(clusters, UserAgentName, federationName, f)
ns = f.FederationNamespace.Name
})
AfterEach(func() {
// Delete all ingresses.
nsName := f.FederationNamespace.Name
deleteAllIngressesOrFail(f.FederationClientset_1_5, nsName)
unregisterClusters(clusters, f)
})
It("should create and update matching ingresses in underlying clusters", func() {
ingress := createIngressOrFail(f.FederationClientset_1_5, ns)
// wait for ingress shards being created
waitForIngressShardsOrFail(ns, ingress, clusters)
ingress = updateIngressOrFail(f.FederationClientset_1_5, ns)
waitForIngressShardsUpdatedOrFail(ns, ingress, clusters)
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForIngress(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that ingresses were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForIngress(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that ingresses were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForIngress(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that ingresses were not deleted from underlying clusters"))
})
var _ = Describe("Ingress connectivity and DNS", func() {
var (
service *v1.Service
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// create backend pod
createBackendPodsOrFail(clusters, ns, FederatedIngressServicePodName)
// create backend service
service = createServiceOrFail(f.FederationClientset_1_5, ns, FederatedIngressServiceName)
// create ingress object
jig.ing = createIngressOrFail(f.FederationClientset_1_5, ns)
// wait for services objects sync
waitForServiceShardsOrFail(ns, service, clusters)
// wait for ingress objects sync
waitForIngressShardsOrFail(ns, jig.ing, clusters)
})
AfterEach(func() {
deleteBackendPodsOrFail(clusters, ns)
if service != nil {
deleteServiceOrFail(f.FederationClientset_1_5, ns, service.Name, nil)
cleanupServiceShardsAndProviderResources(ns, service, clusters)
service = nil
} else {
By("No service to delete. Service is nil")
}
if jig.ing != nil {
deleteIngressOrFail(f.FederationClientset_1_5, ns, jig.ing.Name, nil)
for clusterName, cluster := range clusters {
deleteClusterIngressOrFail(clusterName, cluster.Clientset, ns, jig.ing.Name)
}
jig.ing = nil
} else {
By("No ingress to delete. Ingress is nil")
}
})
PIt("should be able to discover a federated ingress service via DNS", func() {
// we are about the ingress name
svcDNSNames := []string{
fmt.Sprintf("%s.%s", FederatedIngressServiceName, ns),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns),
// TODO these two entries are not set yet
//fmt.Sprintf("%s.%s.%s", FederatedIngressServiceName, ns, federationName),
//fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedIngressServiceName, ns, federationName),
}
// check dns records in underlying cluster
for i, DNSName := range svcDNSNames {
discoverService(f, DNSName, true, "federated-ingress-e2e-discovery-pod-"+strconv.Itoa(i))
}
// TODO check dns record in global dns server
})
It("should be able to connect to a federated ingress via its load balancer", func() {
// check the traffic on federation ingress
jig.waitForFederatedIngress()
})
})
})
})
// Deletes all Ingresses in the given namespace name.
func deleteAllIngressesOrFail(clientset *fedclientset.Clientset, nsName string) {
orphanDependents := false
err := clientset.Extensions().Ingresses(nsName).DeleteCollection(&v1.DeleteOptions{OrphanDependents: &orphanDependents}, v1.ListOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Error in deleting ingresses in namespace: %s", nsName))
}
/*
equivalent returns true if the two ingress spec are equivalent.
*/
func equivalentIngress(federatedIngress, clusterIngress v1beta1.Ingress) bool {
return reflect.DeepEqual(clusterIngress.Spec, federatedIngress.Spec)
}
// verifyCascadingDeletionForIngress verifies that ingresses are deleted from
// underlying clusters when orphan dependents is false and they are not deleted
// when orphan dependents is true.
func verifyCascadingDeletionForIngress(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
ingress := createIngressOrFail(clientset, nsName)
ingressName := ingress.Name
// Check subclusters if the ingress was created there.
By(fmt.Sprintf("Waiting for ingress %s to be created in all underlying clusters", ingressName))
waitForIngressShardsOrFail(nsName, ingress, clusters)
By(fmt.Sprintf("Deleting ingress %s", ingressName))
deleteIngressOrFail(clientset, nsName, ingressName, orphanDependents)
By(fmt.Sprintf("Verifying ingresses %s in underlying clusters", ingressName))
errMessages := []string{}
// ingress should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().Ingresses(nsName).Get(ingressName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for ingress %s in cluster %s, expected ingress to exist", ingressName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for ingress %s in cluster %s, got error: %v", ingressName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
/*
waitForIngressOrFail waits until a ingress is either present or absent in the cluster specified by clientset.
If the condition is not met within timout, it fails the calling test.
*/
func waitForIngressOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
var clusterIngress *v1beta1.Ingress
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is absent", ingress.Name, namespace))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is present", ingress.Name, namespace))
return true, nil // Success
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", ingress.Name, namespace, clusterIngress != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster: Present=%v", ingress.Name, namespace, present)
if present && clusterIngress != nil {
Expect(equivalentIngress(*clusterIngress, *ingress))
}
}
/*
waitForIngressShardsOrFail waits for the ingress to appear in all clusters
*/
func waitForIngressShardsOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, true, FederatedIngressTimeout)
}
}
/*
waitForIngressShardsUpdatedOrFail waits for the ingress to be updated in all clusters
*/
func waitForIngressShardsUpdatedOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressUpdateOrFail(c.Clientset, namespace, ingress, FederatedIngressTimeout)
}
}
/*
waitForIngressUpdateOrFail waits until a ingress is updated in the specified cluster with same spec of federated ingress.
If the condition is not met within timeout, it fails the calling test.
*/
func waitForIngressUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, ingress *v1beta1.Ingress, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated ingress shard of ingress %q in namespace %q from cluster", ingress.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterIngress, err := clientset.Ingresses(namespace).Get(ingress.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good.
if equivalentIngress(*clusterIngress, *ingress) {
By(fmt.Sprintf("Success: shard of federated ingress %q in namespace %q in cluster is updated", ingress.Name, namespace))
return true, nil
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err))
return false, nil
}
By(fmt.Sprintf("Ingress %q in namespace %q in cluster, waiting for service being updated, trying again in %s (err=%v)", ingress.Name, namespace, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify ingress %q in namespace %q in cluster", ingress.Name, namespace)
}
/*
waitForIngressShardsGoneOrFail waits for the ingress to disappear in all clusters
*/
func waitForIngressShardsGoneOrFail(namespace string, ingress *v1beta1.Ingress, clusters map[string]*cluster) {
framework.Logf("Waiting for ingress %q in %d clusters", ingress.Name, len(clusters))
for _, c := range clusters {
waitForIngressOrFail(c.Clientset, namespace, ingress, false, FederatedIngressTimeout)
}
}
func deleteIngressOrFail(clientset *fedclientset.Clientset, namespace string, ingressName string, orphanDependents *bool) {
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteIngressOrFail: clientset: %v, namespace: %v, ingress: %v", clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting ingress %q from namespace %q", ingressName, namespace)
// Wait for the ingress to be deleted.
err = wait.Poll(framework.Poll, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Ingresses(namespace).Get(ingressName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting ingress %s: %v", ingressName, err)
}
}
// TODO: quinton: This is largely a cut 'n paste of the above. Yuck! Refactor as soon as we have a common interface implmented by both fedclientset.Clientset and kubeclientset.Clientset
func deleteClusterIngressOrFail(clusterName string, clientset *kubeclientset.Clientset, namespace string, ingressName string) {
if clientset == nil || len(namespace) == 0 || len(ingressName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteClusterIngressOrFail: cluster: %q, clientset: %v, namespace: %v, ingress: %v", clusterName, clientset, namespace, ingressName))
}
err := clientset.Ingresses(namespace).Delete(ingressName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Error deleting cluster ingress %q/%q from cluster %q", namespace, ingressName, clusterName)
}
func createIngressOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.Ingress {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federated ingress %q in namespace %q", FederatedIngressName, namespace))
ingress := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: FederatedIngressName,
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "testingress-service",
ServicePort: intstr.FromInt(80),
},
},
}
newIng, err := clientset.Extensions().Ingresses(namespace).Create(ingress)
framework.ExpectNoError(err, "Creating ingress %q in namespace %q", ingress.Name, namespace)
By(fmt.Sprintf("Successfully created federated ingress %q in namespace %q", FederatedIngressName, namespace))
return newIng
}
func updateIngressOrFail(clientset *fedclientset.Clientset, namespace string) (newIng *v1beta1.Ingress) {
var err error
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createIngressOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
ingress := &v1beta1.Ingress{
ObjectMeta: v1.ObjectMeta{
Name: FederatedIngressName,
},
Spec: v1beta1.IngressSpec{
Backend: &v1beta1.IngressBackend{
ServiceName: "updated-testingress-service",
ServicePort: intstr.FromInt(80),
},
},
}
err = waitForFederatedIngressExists(clientset, namespace, FederatedIngressName, FederatedIngressTimeout)
if err != nil {
framework.Failf("failed to get ingress %q: %v", FederatedIngressName, err)
}
for i := 0; i < MaxRetriesOnFederatedApiserver; i++ {
newIng, err = clientset.Extensions().Ingresses(namespace).Update(ingress)
if err == nil {
describeIng(namespace)
return newIng
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update ingress %q: %v", FederatedIngressName, err)
}
}
framework.Failf("too many retries updating ingress %q", FederatedIngressName)
return nil
}
func (j *federationTestJig) waitForFederatedIngress() {
// Wait for the loadbalancer IP.
address, err := waitForFederatedIngressAddress(j.client, j.ing.Namespace, j.ing.Name, framework.LoadBalancerPollTimeout)
if err != nil {
framework.Failf("Ingress failed to acquire an IP address within %v", framework.LoadBalancerPollTimeout)
}
j.address = address
framework.Logf("Found address %v for ingress %v", j.address, j.ing.Name)
timeoutClient := &http.Client{Timeout: reqTimeout}
// Check that all rules respond to a simple GET.
for _, rules := range j.ing.Spec.Rules {
proto := "http"
for _, p := range rules.IngressRuleValue.HTTP.Paths {
route := fmt.Sprintf("%v://%v%v", proto, address, p.Path)
framework.Logf("Testing route %v host %v with simple GET", route, rules.Host)
framework.ExpectNoError(pollURL(route, rules.Host, framework.LoadBalancerPollTimeout, framework.LoadBalancerPollInterval, timeoutClient, false))
}
}
}
type federationTestJig struct {
// TODO add TLS check later
rootCAs map[string][]byte
address string
ing *v1beta1.Ingress
client *fedclientset.Clientset
}
func newFederationTestJig(c *fedclientset.Clientset) *federationTestJig {
return &federationTestJig{client: c, rootCAs: map[string][]byte{}}
}
// WaitForFederatedIngressAddress waits for the Ingress to acquire an address.
func waitForFederatedIngressAddress(c *fedclientset.Clientset, ns, ingName string, timeout time.Duration) (string, error) {
var address string
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
ipOrNameList, err := getFederatedIngressAddress(c, ns, ingName)
if err != nil || len(ipOrNameList) == 0 {
framework.Logf("Waiting for Ingress %v to acquire IP, error %v", ingName, err)
return false, nil
}
address = ipOrNameList[0]
return true, nil
})
return address, err
}
// waitForFederatedIngressExists waits for the Ingress object exists.
func waitForFederatedIngressExists(c *fedclientset.Clientset, ns, ingName string, timeout time.Duration) error {
err := wait.PollImmediate(10*time.Second, timeout, func() (bool, error) {
_, err := c.Extensions().Ingresses(ns).Get(ingName, metav1.GetOptions{})
if err != nil {
framework.Logf("Waiting for Ingress %v, error %v", ingName, err)
return false, nil
}
return true, nil
})
return err
}
// getFederatedIngressAddress returns the ips/hostnames associated with the Ingress.
func getFederatedIngressAddress(client *fedclientset.Clientset, ns, name string) ([]string, error) {
ing, err := client.Extensions().Ingresses(ns).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
addresses := []string{}
for _, a := range ing.Status.LoadBalancer.Ingress {
if a.IP != "" {
addresses = append(addresses, a.IP)
}
if a.Hostname != "" {
addresses = append(addresses, a.Hostname)
}
}
return addresses, nil
}

View file

@ -0,0 +1,233 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/api/v1"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
namespacePrefix = "e2e-namespace-test-"
eventNamePrefix = "e2e-namespace-test-event-"
)
// Create/delete ingress api objects
var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-namespace")
Describe("Namespace objects", func() {
var federationName string
var clusters map[string]*cluster // All clusters, keyed by cluster name
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// TODO: Federation API server should be able to answer this.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, federationName, f)
})
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
deleteAllTestNamespaces(nil,
f.FederationClientset_1_5.Core().Namespaces().List,
f.FederationClientset_1_5.Core().Namespaces().Delete)
for _, cluster := range clusters {
deleteAllTestNamespaces(nil,
cluster.Core().Namespaces().List,
cluster.Core().Namespaces().Delete)
}
unregisterClusters(clusters, f)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := createNamespace(f.FederationClientset_1_5.Core().Namespaces())
By(fmt.Sprintf("Deleting namespace %s", nsName))
deleteAllTestNamespaces(nil,
f.FederationClientset_1_5.Core().Namespaces().List,
f.FederationClientset_1_5.Core().Namespaces().Delete)
By(fmt.Sprintf("Verified that deletion succeeded"))
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
orphanDependents := false
verifyNsCascadingDeletion(f.FederationClientset_1_5.Core().Namespaces(), clusters, &orphanDependents)
By(fmt.Sprintf("Verified that namespaces were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
orphanDependents := true
verifyNsCascadingDeletion(f.FederationClientset_1_5.Core().Namespaces(), clusters, &orphanDependents)
By(fmt.Sprintf("Verified that namespaces were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
verifyNsCascadingDeletion(f.FederationClientset_1_5.Core().Namespaces(), clusters, nil)
By(fmt.Sprintf("Verified that namespaces were not deleted from underlying clusters"))
})
It("all resources in the namespace should be deleted when namespace is deleted", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := createNamespace(f.FederationClientset_1_5.Core().Namespaces())
// Create resources in the namespace.
event := api_v1.Event{
ObjectMeta: api_v1.ObjectMeta{
Name: v1.SimpleNameGenerator.GenerateName(eventNamePrefix),
Namespace: nsName,
},
InvolvedObject: api_v1.ObjectReference{
Kind: "Pod",
Namespace: nsName,
Name: "sample-pod",
},
}
By(fmt.Sprintf("Creating event %s in namespace %s", event.Name, nsName))
_, err := f.FederationClientset_1_5.Core().Events(nsName).Create(&event)
if err != nil {
framework.Failf("Failed to create event %v in namespace %s, err: %s", event, nsName, err)
}
By(fmt.Sprintf("Deleting namespace %s", nsName))
deleteAllTestNamespaces(nil,
f.FederationClientset_1_5.Core().Namespaces().List,
f.FederationClientset_1_5.Core().Namespaces().Delete)
By(fmt.Sprintf("Verify that event %s was deleted as well", event.Name))
latestEvent, err := f.FederationClientset_1_5.Core().Events(nsName).Get(event.Name, metav1.GetOptions{})
if !errors.IsNotFound(err) {
framework.Failf("Event %s should have been deleted. Found: %v", event.Name, latestEvent)
}
By(fmt.Sprintf("Verified that deletion succeeded"))
})
})
})
// verifyNsCascadingDeletion verifies that namespaces are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters map[string]*cluster, orphanDependents *bool) {
nsName := createNamespace(nsClient)
// Check subclusters if the namespace was created there.
By(fmt.Sprintf("Waiting for namespace %s to be created in all underlying clusters", nsName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
return false, err
}
if err != nil {
return false, nil
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all namespaces created")
By(fmt.Sprintf("Deleting namespace %s", nsName))
deleteAllTestNamespaces(orphanDependents, nsClient.List, nsClient.Delete)
By(fmt.Sprintf("Verifying namespaces %s in underlying clusters", nsName))
errMessages := []string{}
// namespace should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Namespaces().Get(nsName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for namespace %s in cluster %s, expected namespace to exist", nsName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for namespace %s in cluster %s, got error: %v", nsName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
func createNamespace(nsClient clientset.NamespaceInterface) string {
ns := api_v1.Namespace{
ObjectMeta: api_v1.ObjectMeta{
Name: v1.SimpleNameGenerator.GenerateName(namespacePrefix),
},
}
By(fmt.Sprintf("Creating namespace %s", ns.Name))
_, err := nsClient.Create(&ns)
framework.ExpectNoError(err, "Failed to create namespace %s", ns.Name)
By(fmt.Sprintf("Created namespace %s", ns.Name))
return ns.Name
}
func deleteAllTestNamespaces(orphanDependents *bool, lister func(api_v1.ListOptions) (*api_v1.NamespaceList, error), deleter func(string, *api_v1.DeleteOptions) error) {
list, err := lister(api_v1.ListOptions{})
if err != nil {
framework.Failf("Failed to get all namespaes: %v", err)
return
}
for _, namespace := range list.Items {
if strings.HasPrefix(namespace.Name, namespacePrefix) {
By(fmt.Sprintf("Deleting ns: %s, found by listing", namespace.Name))
err := deleter(namespace.Name, &api_v1.DeleteOptions{OrphanDependents: orphanDependents})
if err != nil {
framework.Failf("Failed to set %s for deletion: %v", namespace.Name, err)
}
}
}
waitForNoTestNamespaces(lister)
}
func waitForNoTestNamespaces(lister func(api_v1.ListOptions) (*api_v1.NamespaceList, error)) {
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
list, err := lister(api_v1.ListOptions{})
if err != nil {
return false, err
}
for _, namespace := range list.Items {
if strings.HasPrefix(namespace.Name, namespacePrefix) {
return false, nil
}
}
return true, nil
})
if err != nil {
framework.Failf("Namespaces not deleted: %v", err)
}
}

271
vendor/k8s.io/kubernetes/test/e2e/federated-secret.go generated vendored Normal file
View file

@ -0,0 +1,271 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
secretNamePrefix = "e2e-secret-test-"
FederatedSecretTimeout = 60 * time.Second
MaxRetries = 3
)
// Create/delete secret api objects
var _ = framework.KubeDescribe("Federation secrets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name
f := framework.NewDefaultFederatedFramework("federated-secret")
Describe("Secret objects", func() {
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, "", f)
})
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete all secrets.
nsName := f.FederationNamespace.Name
deleteAllSecretsOrFail(f.FederationClientset_1_5, nsName)
unregisterClusters(clusters, f)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
secret := createSecretOrFail(f.FederationClientset_1_5, nsName)
// wait for secret shards being created
waitForSecretShardsOrFail(nsName, secret, clusters)
secret = updateSecretOrFail(f.FederationClientset_1_5, nsName, secret.Name)
waitForSecretShardsUpdatedOrFail(nsName, secret, clusters)
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForSecret(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that secrets were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForSecret(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that secrets were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForSecret(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that secrets were not deleted from underlying clusters"))
})
})
})
// deleteAllSecretsOrFail deletes all secrets in the given namespace name.
func deleteAllSecretsOrFail(clientset *fedclientset.Clientset, nsName string) {
SecretList, err := clientset.Core().Secrets(nsName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
orphanDependents := false
for _, Secret := range SecretList.Items {
deleteSecretOrFail(clientset, nsName, Secret.Name, &orphanDependents)
}
}
// verifyCascadingDeletionForSecret verifies that secrets are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForSecret(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
secret := createSecretOrFail(clientset, nsName)
secretName := secret.Name
// Check subclusters if the secret was created there.
By(fmt.Sprintf("Waiting for secret %s to be created in all underlying clusters", secretName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return false, err
}
return false, nil
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all secrets created")
By(fmt.Sprintf("Deleting secret %s", secretName))
deleteSecretOrFail(clientset, nsName, secretName, orphanDependents)
By(fmt.Sprintf("Verifying secrets %s in underlying clusters", secretName))
errMessages := []string{}
// secret should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for secret %s in cluster %s, expected secret to exist", secretName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for secret %s in cluster %s, got error: %v", secretName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
func createSecretOrFail(clientset *fedclientset.Clientset, nsName string) *v1.Secret {
if len(nsName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createSecretOrFail: namespace: %v", nsName))
}
secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{
Name: v1.SimpleNameGenerator.GenerateName(secretNamePrefix),
Namespace: nsName,
},
}
By(fmt.Sprintf("Creating secret %q in namespace %q", secret.Name, nsName))
_, err := clientset.Core().Secrets(nsName).Create(secret)
framework.ExpectNoError(err, "Failed to create secret %s", secret.Name)
By(fmt.Sprintf("Successfully created federated secret %q in namespace %q", secret.Name, nsName))
return secret
}
func deleteSecretOrFail(clientset *fedclientset.Clientset, nsName string, secretName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting secret %q in namespace %q", secretName, nsName))
err := clientset.Core().Secrets(nsName).Delete(secretName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting secret %q in namespace %q", secretName, nsName)
// Wait for the secret to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting secret %s: %v", secretName, err)
}
}
func updateSecretOrFail(clientset *fedclientset.Clientset, nsName string, secretName string) *v1.Secret {
if clientset == nil || len(nsName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateSecretOrFail: clientset: %v, namespace: %v", clientset, nsName))
}
var newSecret *v1.Secret
for retryCount := 0; retryCount < MaxRetries; retryCount++ {
secret, err := clientset.Core().Secrets(nsName).Get(secretName, metav1.GetOptions{})
if err != nil {
framework.Failf("failed to get secret %q: %v", secretName, err)
}
// Update one of the data in the secret.
secret.Data = map[string][]byte{
"key": []byte("value"),
}
newSecret, err = clientset.Core().Secrets(nsName).Update(secret)
if err == nil {
return newSecret
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update secret %q: %v", secretName, err)
}
}
framework.Failf("too many retries updating secret %q", secretName)
return newSecret
}
func waitForSecretShardsOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters {
waitForSecretOrFail(c.Clientset, nsName, secret, true, FederatedSecretTimeout)
}
}
func waitForSecretOrFail(clientset *kubeclientset.Clientset, nsName string, secret *v1.Secret, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName))
var clusterSecret *v1.Secret
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is absent", secret.Name, nsName))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is present", secret.Name, nsName))
return true, nil // Success
}
By(fmt.Sprintf("Secret %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", secret.Name, nsName, clusterSecret != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster: Present=%v", secret.Name, nsName, present)
if present && clusterSecret != nil {
Expect(util.SecretEquivalent(*clusterSecret, *secret))
}
}
func waitForSecretShardsUpdatedOrFail(nsName string, secret *v1.Secret, clusters map[string]*cluster) {
framework.Logf("Waiting for secret %q in %d clusters", secret.Name, len(clusters))
for _, c := range clusters {
waitForSecretUpdateOrFail(c.Clientset, nsName, secret, FederatedSecretTimeout)
}
}
func waitForSecretUpdateOrFail(clientset *kubeclientset.Clientset, nsName string, secret *v1.Secret, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated secret shard of secret %q in namespace %q from cluster", secret.Name, nsName))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterSecret, err := clientset.Core().Secrets(nsName).Get(secret.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good.
if util.SecretEquivalent(*clusterSecret, *secret) {
By(fmt.Sprintf("Success: shard of federated secret %q in namespace %q in cluster is updated", secret.Name, nsName))
return true, nil
} else {
By(fmt.Sprintf("Expected equal secrets. expected: %+v\nactual: %+v", *secret, *clusterSecret))
}
By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for secret being updated, trying again in %s (err=%v)", secret.Name, nsName, framework.Poll, err))
return false, nil
}
By(fmt.Sprintf("Secret %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", secret.Name, nsName, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify secret %q in namespace %q in cluster", secret.Name, nsName)
}

375
vendor/k8s.io/kubernetes/test/e2e/federated-service.go generated vendored Normal file
View file

@ -0,0 +1,375 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"reflect"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
FederatedServiceTimeout = 60 * time.Second
FederatedServiceName = "federated-service"
FederatedServicePodName = "federated-service-test-pod"
KubeDNSConfigMapName = "kube-dns"
KubeDNSConfigMapNamespace = "kube-system"
)
var FederatedServiceLabels = map[string]string{
"foo": "bar",
}
var _ = framework.KubeDescribe("[Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federated-service")
var clusters map[string]*cluster // All clusters, keyed by cluster name
var federationName string
var primaryClusterName string // The name of the "primary" cluster
var _ = Describe("Federated Services", func() {
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// TODO: Federation API server should be able to answer this.
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
clusters = map[string]*cluster{}
primaryClusterName = registerClusters(clusters, UserAgentName, federationName, f)
})
AfterEach(func() {
unregisterClusters(clusters, f)
})
Describe("Service creation", func() {
var (
service *v1.Service
nsName string
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Placeholder
})
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
if service != nil {
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
cleanupServiceShardsAndProviderResources(nsName, service, clusters)
service = nil
nsName = ""
}
})
It("should succeed", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName = f.FederationNamespace.Name
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
By(fmt.Sprintf("Creation of service %q in namespace %q succeeded. Deleting service.", service.Name, nsName))
// Cleanup
err := f.FederationClientset_1_5.Services(nsName).Delete(service.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
By(fmt.Sprintf("Deletion of service %q in namespace %q succeeded.", service.Name, nsName))
})
It("should create matching services in underlying clusters", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName = f.FederationNamespace.Name
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting service %q in namespace %q", service.Name, nsName))
err := f.FederationClientset_1_5.Services(nsName).Delete(service.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, nsName)
}()
waitForServiceShardsOrFail(nsName, service, clusters)
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForService(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that services were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForService(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that services were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForService(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that services were not deleted from underlying clusters"))
})
})
var _ = Describe("DNS", func() {
var (
service *v1.Service
serviceShard *v1.Service
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
// Create kube-dns configmap for kube-dns to accept federation queries.
federationsDomainMap := os.Getenv("FEDERATIONS_DOMAIN_MAP")
if federationsDomainMap == "" {
framework.Failf("missing required env var FEDERATIONS_DOMAIN_MAP")
}
kubeDNSConfigMap := v1.ConfigMap{
ObjectMeta: v1.ObjectMeta{
Name: KubeDNSConfigMapName,
Namespace: KubeDNSConfigMapNamespace,
},
Data: map[string]string{
"federations": federationsDomainMap,
},
}
// Create this configmap in all clusters.
for clusterName, cluster := range clusters {
By(fmt.Sprintf("Creating kube dns config map in cluster: %s", clusterName))
_, err := cluster.Clientset.Core().ConfigMaps(KubeDNSConfigMapNamespace).Create(&kubeDNSConfigMap)
framework.ExpectNoError(err, fmt.Sprintf("Error in creating config map in cluster %s", clusterName))
}
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
obj, err := api.Scheme.DeepCopy(service)
// Cloning shouldn't fail. On the off-chance it does, we
// should shallow copy service to serviceShard before
// failing. If we don't do this we will never really
// get a chance to clean up the underlying services
// when the cloner fails for reasons not in our
// control. For example, cloner bug. That will cause
// the resources to leak, which in turn causes the
// test project to run out of quota and the entire
// suite starts failing. So we must try as hard as
// possible to cleanup the underlying services. So
// if DeepCopy fails, we are going to try with shallow
// copy as a last resort.
if err != nil {
serviceCopy := *service
serviceShard = &serviceCopy
framework.ExpectNoError(err, fmt.Sprintf("Error in deep copying service %q", service.Name))
}
var ok bool
serviceShard, ok = obj.(*v1.Service)
// Same argument as above about using shallow copy
// as a last resort.
if !ok {
serviceCopy := *service
serviceShard = &serviceCopy
framework.ExpectNoError(err, fmt.Sprintf("Unexpected service object copied %T", obj))
}
waitForServiceShardsOrFail(nsName, serviceShard, clusters)
})
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
deleteBackendPodsOrFail(clusters, nsName)
if service != nil {
deleteServiceOrFail(f.FederationClientset_1_5, nsName, service.Name, nil)
service = nil
} else {
By("No service to delete. Service is nil")
}
if serviceShard != nil {
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", serviceShard.Name, nsName))
cleanupServiceShardsAndProviderResources(nsName, serviceShard, clusters)
serviceShard = nil
} else {
By("No service shards to delete. `serviceShard` is nil")
}
// Delete the kube-dns config map from all clusters.
for clusterName, cluster := range clusters {
By(fmt.Sprintf("Deleting kube dns config map from cluster: %s", clusterName))
err := cluster.Clientset.Core().ConfigMaps(KubeDNSConfigMapNamespace).Delete(KubeDNSConfigMapName, nil)
framework.ExpectNoError(err, fmt.Sprintf("Error in deleting config map from cluster %s", clusterName))
}
})
It("should be able to discover a federated service", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
svcDNSNames := []string{
FederatedServiceName,
fmt.Sprintf("%s.%s", FederatedServiceName, nsName),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedServiceName, nsName),
fmt.Sprintf("%s.%s.%s", FederatedServiceName, nsName, federationName),
fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedServiceName, nsName, federationName),
}
// TODO(mml): This could be much faster. We can launch all the test
// pods, perhaps in the BeforeEach, and then just poll until we get
// successes/failures from them all.
for i, DNSName := range svcDNSNames {
discoverService(f, DNSName, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
}
By("Verified that DNS rules are working as expected")
By("Deleting the service to verify that DNS rules still work")
err := f.FederationClientset_1_5.Services(nsName).Delete(FederatedServiceName, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
// Service is deleted, unset the test block-global service variable.
service = nil
for i, DNSName := range svcDNSNames {
discoverService(f, DNSName, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
}
By("Verified that deleting the service does not affect DNS records")
})
Context("non-local federated service", func() {
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete all the backend pods from the shard which is local to the discovery pod.
deleteOneBackendPodOrFail(clusters[primaryClusterName])
})
It("should be able to discover a non-local federated service", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
svcDNSNames := []string{
fmt.Sprintf("%s.%s.%s", FederatedServiceName, nsName, federationName),
fmt.Sprintf("%s.%s.%s.svc.cluster.local.", FederatedServiceName, nsName, federationName),
}
for i, name := range svcDNSNames {
discoverService(f, name, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
}
})
// TODO(mml): This currently takes 9 minutes. Consider reducing the
// TTL and/or running the pods in parallel.
Context("[Slow] missing local service", func() {
It("should never find DNS entries for a missing local service", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
localSvcDNSNames := []string{
FederatedServiceName,
fmt.Sprintf("%s.%s", FederatedServiceName, nsName),
fmt.Sprintf("%s.%s.svc.cluster.local.", FederatedServiceName, nsName),
}
for i, name := range localSvcDNSNames {
discoverService(f, name, false, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
}
})
})
})
})
})
})
// verifyCascadingDeletionForService verifies that services are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForService(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
service := createServiceOrFail(clientset, nsName, FederatedServiceName)
serviceName := service.Name
// Check subclusters if the service was created there.
By(fmt.Sprintf("Waiting for service %s to be created in all underlying clusters", serviceName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
return false, err
}
return false, nil
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all services created")
By(fmt.Sprintf("Deleting service %s", serviceName))
deleteServiceOrFail(clientset, nsName, serviceName, orphanDependents)
By(fmt.Sprintf("Verifying services %s in underlying clusters", serviceName))
errMessages := []string{}
// service should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Core().Services(nsName).Get(serviceName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for service %s in cluster %s, expected service to exist", serviceName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for service %s in cluster %s, got error: %v", serviceName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
/*
equivalent returns true if the two services are equivalent. Fields which are expected to differ between
federated services and the underlying cluster services (e.g. ClusterIP, LoadBalancerIP etc) are ignored.
*/
func equivalent(federationService, clusterService v1.Service) bool {
// TODO: I think that we need a DeepCopy here to avoid clobbering our parameters.
clusterService.Spec.ClusterIP = federationService.Spec.ClusterIP
clusterService.Spec.ExternalIPs = federationService.Spec.ExternalIPs
clusterService.Spec.DeprecatedPublicIPs = federationService.Spec.DeprecatedPublicIPs
clusterService.Spec.LoadBalancerIP = federationService.Spec.LoadBalancerIP
clusterService.Spec.LoadBalancerSourceRanges = federationService.Spec.LoadBalancerSourceRanges
// N.B. We cannot iterate over the port objects directly, as their values
// only get copied and our updates will get lost.
for i := range clusterService.Spec.Ports {
clusterService.Spec.Ports[i].NodePort = federationService.Spec.Ports[i].NodePort
}
return reflect.DeepEqual(clusterService.Spec, federationService.Spec)
}

View file

@ -0,0 +1,139 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/gomega"
)
// Create/delete cluster api objects
var _ = framework.KubeDescribe("Federation apiserver [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-cluster")
Describe("Cluster objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete registered clusters.
// This is if a test failed, it should not affect other tests.
clusterList, err := f.FederationClientset_1_5.Federation().Clusters().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, cluster := range clusterList.Items {
err := f.FederationClientset_1_5.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
contexts := f.GetUnderlyingFederatedContexts()
framework.Logf("Creating %d cluster objects", len(contexts))
for _, context := range contexts {
createClusterObjectOrFail(f, &context)
}
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
clusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
// Verify that deletion works.
framework.Logf("Deleting %d clusters", len(contexts))
for _, context := range contexts {
framework.Logf("Deleting cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
err := f.FederationClientset_1_5.Federation().Clusters().Delete(context.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in deleting cluster %s: %+v", context.Name, err))
framework.Logf("Successfully deleted cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
}
// There should not be any remaining cluster.
framework.Logf("Verifying that zero clusters remain")
clusterList, err := f.FederationClientset_1_5.Federation().Clusters().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
if len(clusterList.Items) != 0 {
framework.Failf("there should not have been any remaining clusters. Found: %+v", clusterList)
}
framework.Logf("Verified that zero clusters remain")
})
})
Describe("Admission control", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
})
It("should not be able to create resources if namespace does not exist", func() {
framework.SkipUnlessFederated(f.ClientSet)
// Creating a service in a non-existing namespace should fail.
svcNamespace := "federation-admission-test-ns"
svcName := "myns"
clientset := f.FederationClientset_1_5
framework.Logf("Trying to create service %s in namespace %s, expect to get error", svcName, svcNamespace)
if _, err := clientset.Core().Services(svcNamespace).Create(newService(svcName, svcNamespace)); err == nil {
framework.Failf("Expected to get an error while creating a service in a non-existing namespace")
}
// Note: We have other tests that verify that we can create resources in existing namespaces, so we dont test it again here.
})
})
})
func newService(name, namespace string) *v1.Service {
return &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{
{
Port: 80,
},
},
},
}
}
// Verify that the cluster is marked ready.
func isReady(clusterName string, clientset *federation_clientset.Clientset) error {
return wait.PollImmediate(time.Second, 5*time.Minute, func() (bool, error) {
c, err := clientset.Federation().Clusters().Get(clusterName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, condition := range c.Status.Conditions {
if condition.Type == federationapi.ClusterReady && condition.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}

114
vendor/k8s.io/kubernetes/test/e2e/federation-authn.go generated vendored Normal file
View file

@ -0,0 +1,114 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("[Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-apiserver-authn")
var _ = Describe("Federation API server authentication", func() {
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
})
It("should accept cluster resources when the client has right authentication credentials", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
svc := createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
deleteServiceOrFail(f.FederationClientset_1_5, nsName, svc.Name, nil)
})
It("should not accept cluster resources when the client has invalid authentication credentials", func() {
framework.SkipUnlessFederated(f.ClientSet)
contexts := f.GetUnderlyingFederatedContexts()
// `contexts` is obtained by calling
// `f.GetUnderlyingFederatedContexts()`. This function in turn
// checks that the contexts it returns does not include the
// federation API server context. So `contexts` is guaranteed to
// contain only the underlying Kubernetes cluster contexts.
fcs, err := invalidAuthFederationClientSet(contexts[0].User)
framework.ExpectNoError(err)
nsName := f.FederationNamespace.Name
svc, err := createService(fcs, nsName, FederatedServiceName)
Expect(errors.IsUnauthorized(err)).To(BeTrue())
if err == nil && svc != nil {
deleteServiceOrFail(fcs, nsName, svc.Name, nil)
}
})
It("should not accept cluster resources when the client has no authentication credentials", func() {
framework.SkipUnlessFederated(f.ClientSet)
fcs, err := invalidAuthFederationClientSet(nil)
framework.ExpectNoError(err)
nsName := f.FederationNamespace.Name
svc, err := createService(fcs, nsName, FederatedServiceName)
Expect(errors.IsUnauthorized(err)).To(BeTrue())
if err == nil && svc != nil {
deleteServiceOrFail(fcs, nsName, svc.Name, nil)
}
})
})
})
func invalidAuthFederationClientSet(user *framework.KubeUser) (*federation_clientset.Clientset, error) {
overrides := &clientcmd.ConfigOverrides{}
if user != nil {
overrides = &clientcmd.ConfigOverrides{
AuthInfo: clientcmdapi.AuthInfo{
Token: user.User.Token,
Username: user.User.Username,
Password: user.User.Password,
},
}
}
config, err := framework.LoadFederatedConfig(overrides)
if err != nil {
return nil, err
}
if user == nil {
config.Password = ""
config.BearerToken = ""
config.Username = ""
}
c, err := federation_clientset.NewForConfig(config)
if err != nil {
return nil, fmt.Errorf("error creating federation clientset: %v", err)
}
return c, nil
}

View file

@ -0,0 +1,289 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
)
const (
FederatedDaemonSetName = "federated-daemonset"
FederatedDaemonSetTimeout = 60 * time.Second
FederatedDaemonSetMaxRetries = 3
)
// Create/delete daemonset api objects
var _ = framework.KubeDescribe("Federation daemonsets [Feature:Federation]", func() {
var clusters map[string]*cluster // All clusters, keyed by cluster name
f := framework.NewDefaultFederatedFramework("federated-daemonset")
Describe("DaemonSet objects", func() {
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, "", f)
})
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete all daemonsets.
nsName := f.FederationNamespace.Name
deleteAllDaemonSetsOrFail(f.FederationClientset_1_5, nsName)
unregisterClusters(clusters, f)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
daemonset := createDaemonSetOrFail(f.FederationClientset_1_5, nsName)
defer func() { // Cleanup
By(fmt.Sprintf("Deleting daemonset %q in namespace %q", daemonset.Name, nsName))
err := f.FederationClientset_1_5.Extensions().DaemonSets(nsName).Delete(daemonset.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting daemonset %q in namespace %q", daemonset.Name, nsName)
}()
// wait for daemonset shards being created
waitForDaemonSetShardsOrFail(nsName, daemonset, clusters)
daemonset = updateDaemonSetOrFail(f.FederationClientset_1_5, nsName)
waitForDaemonSetShardsUpdatedOrFail(nsName, daemonset, clusters)
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForDS(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that daemonsets were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForDS(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that daemonsets were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForDS(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that daemonsets were not deleted from underlying clusters"))
})
})
})
// deleteAllDaemonSetsOrFail deletes all DaemonSets in the given namespace name.
func deleteAllDaemonSetsOrFail(clientset *fedclientset.Clientset, nsName string) {
DaemonSetList, err := clientset.Extensions().DaemonSets(nsName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
orphanDependents := false
for _, daemonSet := range DaemonSetList.Items {
deleteDaemonSetOrFail(clientset, nsName, daemonSet.Name, &orphanDependents)
}
}
// verifyCascadingDeletionForDS verifies that daemonsets are deleted from
// underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForDS(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
daemonset := createDaemonSetOrFail(clientset, nsName)
daemonsetName := daemonset.Name
// Check subclusters if the daemonset was created there.
By(fmt.Sprintf("Waiting for daemonset %s to be created in all underlying clusters", daemonsetName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all daemonsets created")
By(fmt.Sprintf("Deleting daemonset %s", daemonsetName))
deleteDaemonSetOrFail(clientset, nsName, daemonsetName, orphanDependents)
By(fmt.Sprintf("Verifying daemonsets %s in underlying clusters", daemonsetName))
errMessages := []string{}
// daemon set should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for daemonset %s in cluster %s, expected daemonset to exist", daemonsetName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for daemonset %s in cluster %s, got error: %v", daemonsetName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
func createDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.DaemonSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createDaemonSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
daemonset := &v1beta1.DaemonSet{
ObjectMeta: v1.ObjectMeta{
Name: FederatedDaemonSetName,
Namespace: namespace,
},
Spec: v1beta1.DaemonSetSpec{
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"aaa": "bbb"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "container1",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
},
},
}
By(fmt.Sprintf("Creating daemonset %q in namespace %q", daemonset.Name, namespace))
_, err := clientset.Extensions().DaemonSets(namespace).Create(daemonset)
framework.ExpectNoError(err, "Failed to create daemonset %s", daemonset.Name)
By(fmt.Sprintf("Successfully created federated daemonset %q in namespace %q", FederatedDaemonSetName, namespace))
return daemonset
}
func deleteDaemonSetOrFail(clientset *fedclientset.Clientset, nsName string, daemonsetName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting daemonset %q in namespace %q", daemonsetName, nsName))
err := clientset.Extensions().DaemonSets(nsName).Delete(daemonsetName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting daemonset %q in namespace %q", daemonsetName, nsName)
// Wait for the daemonset to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().DaemonSets(nsName).Get(daemonsetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting daemonset %s: %v", daemonsetName, err)
}
}
func updateDaemonSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.DaemonSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateDaemonSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
var newDaemonSet *v1beta1.DaemonSet
for retryCount := 0; retryCount < FederatedDaemonSetMaxRetries; retryCount++ {
daemonset, err := clientset.Extensions().DaemonSets(namespace).Get(FederatedDaemonSetName, metav1.GetOptions{})
if err != nil {
framework.Failf("failed to get daemonset %q: %v", FederatedDaemonSetName, err)
}
// Update one of the data in the daemonset.
daemonset.Annotations = map[string]string{"ccc": "ddd"}
newDaemonSet, err = clientset.Extensions().DaemonSets(namespace).Update(daemonset)
if err == nil {
return newDaemonSet
}
if !errors.IsConflict(err) && !errors.IsServerTimeout(err) {
framework.Failf("failed to update daemonset %q: %v", FederatedDaemonSetName, err)
}
}
framework.Failf("too many retries updating daemonset %q", FederatedDaemonSetName)
return newDaemonSet
}
func waitForDaemonSetShardsOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters {
waitForDaemonSetOrFail(c.Clientset, namespace, daemonset, true, FederatedDaemonSetTimeout)
}
}
func waitForDaemonSetOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
var clusterDaemonSet *v1beta1.DaemonSet
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is absent", daemonset.Name, namespace))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is present", daemonset.Name, namespace))
return true, nil // Success
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", daemonset.Name, namespace, clusterDaemonSet != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify daemonset %q in namespace %q in cluster: Present=%v", daemonset.Name, namespace, present)
if present && clusterDaemonSet != nil {
Expect(util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset))
}
}
func waitForDaemonSetShardsUpdatedOrFail(namespace string, daemonset *v1beta1.DaemonSet, clusters map[string]*cluster) {
framework.Logf("Waiting for daemonset %q in %d clusters", daemonset.Name, len(clusters))
for _, c := range clusters {
waitForDaemonSetUpdateOrFail(c.Clientset, namespace, daemonset, FederatedDaemonSetTimeout)
}
}
func waitForDaemonSetUpdateOrFail(clientset *kubeclientset.Clientset, namespace string, daemonset *v1beta1.DaemonSet, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated daemonset shard of daemonset %q in namespace %q from cluster", daemonset.Name, namespace))
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterDaemonSet, err := clientset.Extensions().DaemonSets(namespace).Get(daemonset.Name, metav1.GetOptions{})
if err == nil { // We want it present, and the Get succeeded, so we're all good.
if util.ObjectMetaAndSpecEquivalent(clusterDaemonSet, daemonset) {
By(fmt.Sprintf("Success: shard of federated daemonset %q in namespace %q in cluster is updated", daemonset.Name, namespace))
return true, nil
} else {
By(fmt.Sprintf("Expected equal daemonsets. expected: %+v\nactual: %+v", *daemonset, *clusterDaemonSet))
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster, waiting for daemonset being updated, trying again in %s (err=%v)", daemonset.Name, namespace, framework.Poll, err))
return false, nil
}
By(fmt.Sprintf("DaemonSet %q in namespace %q in cluster, waiting for being updated, trying again in %s (err=%v)", daemonset.Name, namespace, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify daemonset %q in namespace %q in cluster", daemonset.Name, namespace)
}

View file

@ -0,0 +1,305 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
FederationDeploymentName = "federation-deployment"
FederatedDeploymentTimeout = 120 * time.Second
)
// Create/delete deployment api objects
var _ = framework.KubeDescribe("Federation deployments [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-deployment")
Describe("Deployment objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete all deployments.
nsName := f.FederationNamespace.Name
deleteAllDeploymentsOrFail(f.FederationClientset_1_5, nsName)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
deployment := createDeploymentOrFail(f.FederationClientset_1_5, nsName)
By(fmt.Sprintf("Creation of deployment %q in namespace %q succeeded. Deleting deployment.", deployment.Name, nsName))
// Cleanup
err := f.FederationClientset_1_5.Extensions().Deployments(nsName).Delete(deployment.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting deployment %q in namespace %q", deployment.Name, deployment.Namespace)
By(fmt.Sprintf("Deletion of deployment %q in namespace %q succeeded.", deployment.Name, nsName))
})
})
// e2e cases for federated deployment controller
Describe("Federated Deployment", func() {
var (
clusters map[string]*cluster
federationName string
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, federationName, f)
})
AfterEach(func() {
nsName := f.FederationNamespace.Name
deleteAllDeploymentsOrFail(f.FederationClientset_1_5, nsName)
unregisterClusters(clusters, f)
})
It("should create and update matching deployments in underling clusters", func() {
nsName := f.FederationNamespace.Name
dep := createDeploymentOrFail(f.FederationClientset_1_5, nsName)
defer func() {
// cleanup. deletion of deployments is not supported for underlying clusters
By(fmt.Sprintf("Preparing deployment %q/%q for deletion by setting replicas to zero", nsName, dep.Name))
replicas := int32(0)
dep.Spec.Replicas = &replicas
f.FederationClientset_1_5.Deployments(nsName).Update(dep)
waitForDeploymentOrFail(f.FederationClientset_1_5, nsName, dep.Name, clusters)
f.FederationClientset_1_5.Deployments(nsName).Delete(dep.Name, &v1.DeleteOptions{})
}()
waitForDeploymentOrFail(f.FederationClientset_1_5, nsName, dep.Name, clusters)
By(fmt.Sprintf("Successfuly created and synced deployment %q/%q to clusters", nsName, dep.Name))
updateDeploymentOrFail(f.FederationClientset_1_5, nsName)
waitForDeploymentOrFail(f.FederationClientset_1_5, nsName, dep.Name, clusters)
By(fmt.Sprintf("Successfuly updated and synced deployment %q/%q to clusters", nsName, dep.Name))
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForDeployment(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that deployments were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForDeployment(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that deployments were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForDeployment(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that deployments were not deleted from underlying clusters"))
})
})
})
// deleteAllDeploymentsOrFail deletes all deployments in the given namespace name.
func deleteAllDeploymentsOrFail(clientset *fedclientset.Clientset, nsName string) {
deploymentList, err := clientset.Extensions().Deployments(nsName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
orphanDependents := false
for _, deployment := range deploymentList.Items {
deleteDeploymentOrFail(clientset, nsName, deployment.Name, &orphanDependents)
}
}
// verifyCascadingDeletionForDeployment verifies that deployments are deleted
// from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForDeployment(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
deployment := createDeploymentOrFail(clientset, nsName)
deploymentName := deployment.Name
// Check subclusters if the deployment was created there.
By(fmt.Sprintf("Waiting for deployment %s to be created in all underlying clusters", deploymentName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all deployments created")
By(fmt.Sprintf("Deleting deployment %s", deploymentName))
deleteDeploymentOrFail(clientset, nsName, deploymentName, orphanDependents)
By(fmt.Sprintf("Verifying deployments %s in underlying clusters", deploymentName))
errMessages := []string{}
// deployment should be present in underlying clusters unless orphanDependents is false.
shouldExist := orphanDependents == nil || *orphanDependents == true
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if shouldExist && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for deployment %s in cluster %s, expected deployment to exist", deploymentName, clusterName))
} else if !shouldExist && !errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for deployment %s in cluster %s, got error: %v", deploymentName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
func waitForDeploymentOrFail(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) {
err := waitForDeployment(c, namespace, deploymentName, clusters)
framework.ExpectNoError(err, "Failed to verify deployment %q/%q, err: %v", namespace, deploymentName, err)
}
func waitForDeployment(c *fedclientset.Clientset, namespace string, deploymentName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedDeploymentTimeout, func() (bool, error) {
fdep, err := c.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil {
return false, err
}
specReplicas, statusReplicas := int32(0), int32(0)
for _, cluster := range clusters {
dep, err := cluster.Deployments(namespace).Get(deploymentName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting deployment: %q/%q/%q, err: %v", cluster.name, namespace, deploymentName, err))
return false, err
}
if err == nil {
if !verifyDeployment(fdep, dep) {
By(fmt.Sprintf("Deployment meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, fdep, dep))
return false, nil
}
specReplicas += *dep.Spec.Replicas
statusReplicas += dep.Status.Replicas
}
}
if statusReplicas == fdep.Status.Replicas && specReplicas >= *fdep.Spec.Replicas {
return true, nil
}
By(fmt.Sprintf("Replicas not match, federation replicas: %v/%v, clusters replicas: %v/%v\n", *fdep.Spec.Replicas, fdep.Status.Replicas, specReplicas, statusReplicas))
return false, nil
})
return err
}
func verifyDeployment(fedDeployment, localDeployment *v1beta1.Deployment) bool {
localDeployment = fedutil.DeepCopyDeployment(localDeployment)
localDeployment.Spec.Replicas = fedDeployment.Spec.Replicas
return fedutil.DeploymentEquivalent(fedDeployment, localDeployment)
}
func createDeploymentOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.Deployment {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createDeploymentOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federation deployment %q in namespace %q", FederationDeploymentName, namespace))
deployment := newDeploymentForFed(namespace, FederationDeploymentName, 5)
_, err := clientset.Extensions().Deployments(namespace).Create(deployment)
framework.ExpectNoError(err, "Creating deployment %q in namespace %q", deployment.Name, namespace)
By(fmt.Sprintf("Successfully created federation deployment %q in namespace %q", FederationDeploymentName, namespace))
return deployment
}
func updateDeploymentOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.Deployment {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateDeploymentOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Updating federation deployment %q in namespace %q", FederationDeploymentName, namespace))
deployment := newDeploymentForFed(namespace, FederationDeploymentName, 15)
newRs, err := clientset.Deployments(namespace).Update(deployment)
framework.ExpectNoError(err, "Updating deployment %q in namespace %q", deployment.Name, namespace)
By(fmt.Sprintf("Successfully updated federation deployment %q in namespace %q", FederationDeploymentName, namespace))
return newRs
}
func deleteDeploymentOrFail(clientset *fedclientset.Clientset, nsName string, deploymentName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting deployment %q in namespace %q", deploymentName, nsName))
err := clientset.Extensions().Deployments(nsName).Delete(deploymentName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting deployment %q in namespace %q", deploymentName, nsName)
// Wait for the deployment to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().Deployments(nsName).Get(deploymentName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting deployment %s: %v", deploymentName, err)
}
}
func newDeploymentForFed(namespace string, name string, replicas int32) *v1beta1.Deployment {
return &v1beta1.Deployment{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "myrs"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": "myrs"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
},
},
},
},
},
}
}

97
vendor/k8s.io/kubernetes/test/e2e/federation-event.go generated vendored Normal file
View file

@ -0,0 +1,97 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/gomega"
)
const (
FederationEventName = "federation-event"
)
// Create/delete event api objects.
var _ = framework.KubeDescribe("Federation events [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-event")
Describe("Event objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
// Delete registered events.
eventList, err := f.FederationClientset_1_5.Core().Events(nsName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, event := range eventList.Items {
err := f.FederationClientset_1_5.Core().Events(nsName).Delete(event.Name, &v1.DeleteOptions{})
Expect(err).NotTo(HaveOccurred())
}
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
event := createEventOrFail(f.FederationClientset_1_5, nsName)
By(fmt.Sprintf("Creation of event %q in namespace %q succeeded. Deleting event.", event.Name, nsName))
// Cleanup
err := f.FederationClientset_1_5.Core().Events(nsName).Delete(event.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting event %q in namespace %q", event.Name, event.Namespace)
By(fmt.Sprintf("Deletion of event %q in namespace %q succeeded.", event.Name, nsName))
})
})
})
func createEventOrFail(clientset *federation_clientset.Clientset, namespace string) *v1.Event {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createEventOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federated event %q in namespace %q", FederationEventName, namespace))
event := &v1.Event{
ObjectMeta: v1.ObjectMeta{
Name: FederationEventName,
Namespace: namespace,
},
InvolvedObject: v1.ObjectReference{
Kind: "Pod",
Name: "pod-name",
Namespace: namespace,
UID: "C934D34AFB20242",
APIVersion: "version",
},
Source: v1.EventSource{
Component: "kubelet",
Host: "kublet.node1",
},
Count: 1,
Type: v1.EventTypeNormal,
}
_, err := clientset.Core().Events(namespace).Create(event)
framework.ExpectNoError(err, "Creating event %q in namespace %q", event.Name, namespace)
By(fmt.Sprintf("Successfully created federated event %q in namespace %q", FederationEventName, namespace))
return event
}

View file

@ -0,0 +1,306 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os"
"strings"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
fedutil "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/test/e2e/framework"
"reflect"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
)
const (
FederationReplicaSetName = "federation-replicaset"
FederatedReplicaSetTimeout = 120 * time.Second
)
// Create/delete replicaset api objects
var _ = framework.KubeDescribe("Federation replicasets [Feature:Federation]", func() {
f := framework.NewDefaultFederatedFramework("federation-replicaset")
Describe("ReplicaSet objects", func() {
AfterEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
// Delete all replicasets.
nsName := f.FederationNamespace.Name
deleteAllReplicaSetsOrFail(f.FederationClientset_1_5, nsName)
})
It("should be created and deleted successfully", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
replicaset := createReplicaSetOrFail(f.FederationClientset_1_5, nsName)
By(fmt.Sprintf("Creation of replicaset %q in namespace %q succeeded. Deleting replicaset.", replicaset.Name, nsName))
// Cleanup
err := f.FederationClientset_1_5.Extensions().ReplicaSets(nsName).Delete(replicaset.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting replicaset %q in namespace %q", replicaset.Name, replicaset.Namespace)
By(fmt.Sprintf("Deletion of replicaset %q in namespace %q succeeded.", replicaset.Name, nsName))
})
})
// e2e cases for federated replicaset controller
Describe("Federated ReplicaSet", func() {
var (
clusters map[string]*cluster
federationName string
)
BeforeEach(func() {
framework.SkipUnlessFederated(f.ClientSet)
if federationName = os.Getenv("FEDERATION_NAME"); federationName == "" {
federationName = DefaultFederationName
}
clusters = map[string]*cluster{}
registerClusters(clusters, UserAgentName, federationName, f)
})
AfterEach(func() {
// Delete all replicasets.
nsName := f.FederationNamespace.Name
deleteAllReplicaSetsOrFail(f.FederationClientset_1_5, nsName)
unregisterClusters(clusters, f)
})
It("should create and update matching replicasets in underling clusters", func() {
nsName := f.FederationNamespace.Name
rs := createReplicaSetOrFail(f.FederationClientset_1_5, nsName)
defer func() {
// cleanup. deletion of replicasets is not supported for underlying clusters
By(fmt.Sprintf("Preparing replicaset %q/%q for deletion by setting replicas to zero", nsName, rs.Name))
replicas := int32(0)
rs.Spec.Replicas = &replicas
f.FederationClientset_1_5.ReplicaSets(nsName).Update(rs)
waitForReplicaSetOrFail(f.FederationClientset_1_5, nsName, rs.Name, clusters)
f.FederationClientset_1_5.ReplicaSets(nsName).Delete(rs.Name, &v1.DeleteOptions{})
}()
waitForReplicaSetOrFail(f.FederationClientset_1_5, nsName, rs.Name, clusters)
By(fmt.Sprintf("Successfuly created and synced replicaset %q/%q to clusters", nsName, rs.Name))
updateReplicaSetOrFail(f.FederationClientset_1_5, nsName)
waitForReplicaSetOrFail(f.FederationClientset_1_5, nsName, rs.Name, clusters)
By(fmt.Sprintf("Successfuly updated and synced replicaset %q/%q to clusters", nsName, rs.Name))
})
It("should be deleted from underlying clusters when OrphanDependents is false", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := false
verifyCascadingDeletionForReplicaSet(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that replica sets were deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is true", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
orphanDependents := true
verifyCascadingDeletionForReplicaSet(f.FederationClientset_1_5, clusters, &orphanDependents, nsName)
By(fmt.Sprintf("Verified that replica sets were not deleted from underlying clusters"))
})
It("should not be deleted from underlying clusters when OrphanDependents is nil", func() {
framework.SkipUnlessFederated(f.ClientSet)
nsName := f.FederationNamespace.Name
verifyCascadingDeletionForReplicaSet(f.FederationClientset_1_5, clusters, nil, nsName)
By(fmt.Sprintf("Verified that replica sets were not deleted from underlying clusters"))
})
})
})
// deleteAllReplicaSetsOrFail deletes all replicasets in the given namespace name.
func deleteAllReplicaSetsOrFail(clientset *fedclientset.Clientset, nsName string) {
replicasetList, err := clientset.Extensions().ReplicaSets(nsName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
orphanDependents := false
for _, replicaset := range replicasetList.Items {
deleteReplicaSetOrFail(clientset, nsName, replicaset.Name, &orphanDependents)
}
}
// verifyCascadingDeletionForReplicaSet verifies that replicaSets are deleted
// from underlying clusters when orphan dependents is false and they are not
// deleted when orphan dependents is true.
func verifyCascadingDeletionForReplicaSet(clientset *fedclientset.Clientset, clusters map[string]*cluster, orphanDependents *bool, nsName string) {
replicaSet := createReplicaSetOrFail(clientset, nsName)
replicaSetName := replicaSet.Name
// Check subclusters if the replicaSet was created there.
By(fmt.Sprintf("Waiting for replica sets %s to be created in all underlying clusters", replicaSetName))
err := wait.Poll(5*time.Second, 2*time.Minute, func() (bool, error) {
for _, cluster := range clusters {
_, err := cluster.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return false, nil
}
if err != nil {
return false, err
}
}
return true, nil
})
framework.ExpectNoError(err, "Not all replica sets created")
By(fmt.Sprintf("Deleting replica set %s", replicaSetName))
deleteReplicaSetOrFail(clientset, nsName, replicaSetName, orphanDependents)
By(fmt.Sprintf("Verifying replica sets %s in underlying clusters", replicaSetName))
errMessages := []string{}
for clusterName, clusterClientset := range clusters {
_, err := clusterClientset.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if (orphanDependents == nil || *orphanDependents == true) && errors.IsNotFound(err) {
errMessages = append(errMessages, fmt.Sprintf("unexpected NotFound error for replica set %s in cluster %s, expected replica set to exist", replicaSetName, clusterName))
} else if (orphanDependents != nil && *orphanDependents == false) && (err == nil || !errors.IsNotFound(err)) {
errMessages = append(errMessages, fmt.Sprintf("expected NotFound error for replica set %s in cluster %s, got error: %v", replicaSetName, clusterName, err))
}
}
if len(errMessages) != 0 {
framework.Failf("%s", strings.Join(errMessages, "; "))
}
}
func waitForReplicaSetOrFail(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) {
err := waitForReplicaSet(c, namespace, replicaSetName, clusters)
framework.ExpectNoError(err, "Failed to verify replica set %q/%q, err: %v", namespace, replicaSetName, err)
}
func waitForReplicaSet(c *fedclientset.Clientset, namespace string, replicaSetName string, clusters map[string]*cluster) error {
err := wait.Poll(10*time.Second, FederatedReplicaSetTimeout, func() (bool, error) {
frs, err := c.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil {
return false, err
}
specReplicas, statusReplicas := int32(0), int32(0)
for _, cluster := range clusters {
rs, err := cluster.ReplicaSets(namespace).Get(replicaSetName, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
By(fmt.Sprintf("Failed getting replicaset: %q/%q/%q, err: %v", cluster.name, namespace, replicaSetName, err))
return false, err
}
if err == nil {
if !equivalentReplicaSet(frs, rs) {
By(fmt.Sprintf("Replicaset meta or spec not match for cluster %q:\n federation: %v\n cluster: %v", cluster.name, frs, rs))
return false, nil
}
specReplicas += *rs.Spec.Replicas
statusReplicas += rs.Status.Replicas
}
}
if statusReplicas == frs.Status.Replicas && specReplicas >= *frs.Spec.Replicas {
return true, nil
}
By(fmt.Sprintf("Replicas not match, federation replicas: %v/%v, clusters replicas: %v/%v\n", *frs.Spec.Replicas, frs.Status.Replicas, specReplicas, statusReplicas))
return false, nil
})
return err
}
func equivalentReplicaSet(fedReplicaSet, localReplicaSet *v1beta1.ReplicaSet) bool {
localReplicaSetSpec := localReplicaSet.Spec
localReplicaSetSpec.Replicas = fedReplicaSet.Spec.Replicas
return fedutil.ObjectMetaEquivalent(fedReplicaSet.ObjectMeta, localReplicaSet.ObjectMeta) &&
reflect.DeepEqual(fedReplicaSet.Spec, localReplicaSetSpec)
}
func createReplicaSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.ReplicaSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to createReplicaSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Creating federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
replicaset := newReplicaSet(namespace, FederationReplicaSetName, 5)
_, err := clientset.Extensions().ReplicaSets(namespace).Create(replicaset)
framework.ExpectNoError(err, "Creating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully created federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
return replicaset
}
func deleteReplicaSetOrFail(clientset *fedclientset.Clientset, nsName string, replicaSetName string, orphanDependents *bool) {
By(fmt.Sprintf("Deleting replica set %q in namespace %q", replicaSetName, nsName))
err := clientset.Extensions().ReplicaSets(nsName).Delete(replicaSetName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting replica set %q in namespace %q", replicaSetName, nsName)
// Wait for the replicaSet to be deleted.
err = wait.Poll(5*time.Second, wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Extensions().ReplicaSets(nsName).Get(replicaSetName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting replica set %s: %v", replicaSetName, err)
}
}
func updateReplicaSetOrFail(clientset *fedclientset.Clientset, namespace string) *v1beta1.ReplicaSet {
if clientset == nil || len(namespace) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to updateReplicaSetOrFail: clientset: %v, namespace: %v", clientset, namespace))
}
By(fmt.Sprintf("Updating federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
replicaset := newReplicaSet(namespace, FederationReplicaSetName, 15)
newRs, err := clientset.ReplicaSets(namespace).Update(replicaset)
framework.ExpectNoError(err, "Updating replicaset %q in namespace %q", replicaset.Name, namespace)
By(fmt.Sprintf("Successfully updated federation replicaset %q in namespace %q", FederationReplicaSetName, namespace))
return newRs
}
func newReplicaSet(namespace string, name string, replicas int32) *v1beta1.ReplicaSet {
return &v1beta1.ReplicaSet{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1beta1.ReplicaSetSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"name": "myrs"},
},
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": "myrs"},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "nginx",
},
},
},
},
},
}
}

527
vendor/k8s.io/kubernetes/test/e2e/federation-util.go generated vendored Normal file
View file

@ -0,0 +1,527 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
federationapi "k8s.io/kubernetes/federation/apis/federation/v1beta1"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_clientset"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
KubeAPIQPS float32 = 20.0
KubeAPIBurst = 30
DefaultFederationName = "federation"
UserAgentName = "federation-e2e"
// We use this to decide how long to wait for our DNS probes to succeed.
DNSTTL = 180 * time.Second // TODO: make k8s.io/kubernetes/federation/pkg/federation-controller/service.minDnsTtl exported, and import it here.
)
/*
cluster keeps track of the assorted objects and state related to each cluster
in the federation
*/
type cluster struct {
name string
*kubeclientset.Clientset
namespaceCreated bool // Did we need to create a new namespace in this cluster? If so, we should delete it.
backendPod *v1.Pod // The backend pod, if one's been created.
}
func createClusterObjectOrFail(f *framework.Framework, context *framework.E2EContext) {
framework.Logf("Creating cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
cluster := federationapi.Cluster{
ObjectMeta: v1.ObjectMeta{
Name: context.Name,
},
Spec: federationapi.ClusterSpec{
ServerAddressByClientCIDRs: []federationapi.ServerAddressByClientCIDR{
{
ClientCIDR: "0.0.0.0/0",
ServerAddress: context.Cluster.Cluster.Server,
},
},
SecretRef: &v1.LocalObjectReference{
// Note: Name must correlate with federation build script secret name,
// which currently matches the cluster name.
// See federation/cluster/common.sh:132
Name: context.Name,
},
},
}
_, err := f.FederationClientset_1_5.Federation().Clusters().Create(&cluster)
framework.ExpectNoError(err, fmt.Sprintf("creating cluster: %+v", err))
framework.Logf("Successfully created cluster object: %s (%s, secret: %s)", context.Name, context.Cluster.Cluster.Server, context.Name)
}
func clusterIsReadyOrFail(f *framework.Framework, context *framework.E2EContext) {
c, err := f.FederationClientset_1_5.Federation().Clusters().Get(context.Name, metav1.GetOptions{})
framework.ExpectNoError(err, fmt.Sprintf("get cluster: %+v", err))
if c.ObjectMeta.Name != context.Name {
framework.Failf("cluster name does not match input context: actual=%+v, expected=%+v", c, context)
}
err = isReady(context.Name, f.FederationClientset_1_5)
framework.ExpectNoError(err, fmt.Sprintf("unexpected error in verifying if cluster %s is ready: %+v", context.Name, err))
framework.Logf("Cluster %s is Ready", context.Name)
}
// waitForAllClustersReady wait for all clusters defined in e2e context to be created
// return ClusterList until the listed cluster items equals clusterCount
func waitForAllClustersReady(f *framework.Framework, clusterCount int) *federationapi.ClusterList {
var clusterList *federationapi.ClusterList
if err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
var err error
clusterList, err = f.FederationClientset_1_5.Federation().Clusters().List(v1.ListOptions{})
if err != nil {
return false, err
}
framework.Logf("%d clusters registered, waiting for %d", len(clusterList.Items), clusterCount)
if len(clusterList.Items) == clusterCount {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Failed to list registered clusters: %+v", err)
}
return clusterList
}
func createClientsetForCluster(c federationapi.Cluster, i int, userAgentName string) *kubeclientset.Clientset {
kubecfg, err := clientcmd.LoadFromFile(framework.TestContext.KubeConfig)
framework.ExpectNoError(err, "error loading KubeConfig: %v", err)
cfgOverride := &clientcmd.ConfigOverrides{
ClusterInfo: clientcmdapi.Cluster{
Server: c.Spec.ServerAddressByClientCIDRs[0].ServerAddress,
},
}
ccfg := clientcmd.NewNonInteractiveClientConfig(*kubecfg, c.Name, cfgOverride, clientcmd.NewDefaultClientConfigLoadingRules())
cfg, err := ccfg.ClientConfig()
framework.ExpectNoError(err, "Error creating client config in cluster #%d (%q)", i, c.Name)
cfg.QPS = KubeAPIQPS
cfg.Burst = KubeAPIBurst
return kubeclientset.NewForConfigOrDie(restclient.AddUserAgent(cfg, userAgentName))
}
// Creates the federation namespace in all underlying clusters.
func createNamespaceInClusters(clusters map[string]*cluster, f *framework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
// The e2e Framework created the required namespace in federation control plane, but we need to create it in all the others, if it doesn't yet exist.
// TODO(nikhiljindal): remove this once we have the namespace controller working as expected.
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); errors.IsNotFound(err) {
ns := &v1.Namespace{
ObjectMeta: v1.ObjectMeta{
Name: nsName,
},
}
_, err := c.Clientset.Core().Namespaces().Create(ns)
if err == nil {
c.namespaceCreated = true
}
framework.ExpectNoError(err, "Couldn't create the namespace %s in cluster %q", nsName, name)
framework.Logf("Namespace %s created in cluster %q", nsName, name)
} else if err != nil {
framework.Logf("Couldn't create the namespace %s in cluster %q: %v", nsName, name, err)
}
}
}
// Unregisters the given clusters from federation control plane.
// Also deletes the federation namespace from each cluster.
func unregisterClusters(clusters map[string]*cluster, f *framework.Framework) {
nsName := f.FederationNamespace.Name
for name, c := range clusters {
if c.namespaceCreated {
if _, err := c.Clientset.Core().Namespaces().Get(nsName, metav1.GetOptions{}); !errors.IsNotFound(err) {
err := c.Clientset.Core().Namespaces().Delete(nsName, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Couldn't delete the namespace %s in cluster %q: %v", nsName, name, err)
}
framework.Logf("Namespace %s deleted in cluster %q", nsName, name)
}
}
// Delete the registered clusters in the federation API server.
clusterList, err := f.FederationClientset_1_5.Federation().Clusters().List(v1.ListOptions{})
framework.ExpectNoError(err, "Error listing clusters")
for _, cluster := range clusterList.Items {
err := f.FederationClientset_1_5.Federation().Clusters().Delete(cluster.Name, &v1.DeleteOptions{})
framework.ExpectNoError(err, "Error deleting cluster %q", cluster.Name)
}
}
// can not be moved to util, as By and Expect must be put in Ginkgo test unit
func registerClusters(clusters map[string]*cluster, userAgentName, federationName string, f *framework.Framework) string {
contexts := f.GetUnderlyingFederatedContexts()
for _, context := range contexts {
createClusterObjectOrFail(f, &context)
}
By("Obtaining a list of all the clusters")
clusterList := waitForAllClustersReady(f, len(contexts))
framework.Logf("Checking that %d clusters are Ready", len(contexts))
for _, context := range contexts {
clusterIsReadyOrFail(f, &context)
}
framework.Logf("%d clusters are Ready", len(contexts))
primaryClusterName := clusterList.Items[0].Name
By(fmt.Sprintf("Labeling %q as the first cluster", primaryClusterName))
for i, c := range clusterList.Items {
framework.Logf("Creating a clientset for the cluster %s", c.Name)
Expect(framework.TestContext.KubeConfig).ToNot(Equal(""), "KubeConfig must be specified to load clusters' client config")
clusters[c.Name] = &cluster{c.Name, createClientsetForCluster(c, i, userAgentName), false, nil}
}
createNamespaceInClusters(clusters, f)
return primaryClusterName
}
/*
waitForServiceOrFail waits until a service is either present or absent in the cluster specified by clientset.
If the condition is not met within timout, it fails the calling test.
*/
func waitForServiceOrFail(clientset *kubeclientset.Clientset, namespace string, service *v1.Service, present bool, timeout time.Duration) {
By(fmt.Sprintf("Fetching a federated service shard of service %q in namespace %q from cluster", service.Name, namespace))
var clusterService *v1.Service
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
clusterService, err := clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if (!present) && errors.IsNotFound(err) { // We want it gone, and it's gone.
By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is absent", service.Name, namespace))
return true, nil // Success
}
if present && err == nil { // We want it present, and the Get succeeded, so we're all good.
By(fmt.Sprintf("Success: shard of federated service %q in namespace %q in cluster is present", service.Name, namespace))
return true, nil // Success
}
By(fmt.Sprintf("Service %q in namespace %q in cluster. Found: %v, waiting for Found: %v, trying again in %s (err=%v)", service.Name, namespace, clusterService != nil && err == nil, present, framework.Poll, err))
return false, nil
})
framework.ExpectNoError(err, "Failed to verify service %q in namespace %q in cluster: Present=%v", service.Name, namespace, present)
if present && clusterService != nil {
Expect(equivalent(*clusterService, *service))
}
}
/*
waitForServiceShardsOrFail waits for the service to appear in all clusters
*/
func waitForServiceShardsOrFail(namespace string, service *v1.Service, clusters map[string]*cluster) {
framework.Logf("Waiting for service %q in %d clusters", service.Name, len(clusters))
for _, c := range clusters {
waitForServiceOrFail(c.Clientset, namespace, service, true, FederatedServiceTimeout)
}
}
func createService(clientset *fedclientset.Clientset, namespace, name string) (*v1.Service, error) {
if clientset == nil || len(namespace) == 0 {
return nil, fmt.Errorf("Internal error: invalid parameters passed to createService: clientset: %v, namespace: %v", clientset, namespace)
}
By(fmt.Sprintf("Creating federated service %q in namespace %q", name, namespace))
service := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: namespace,
},
Spec: v1.ServiceSpec{
Selector: FederatedServiceLabels,
Type: "LoadBalancer",
Ports: []v1.ServicePort{
{
Name: "http",
Protocol: v1.ProtocolTCP,
Port: 80,
TargetPort: intstr.FromInt(8080),
},
},
SessionAffinity: v1.ServiceAffinityNone,
},
}
By(fmt.Sprintf("Trying to create service %q in namespace %q", service.Name, namespace))
return clientset.Services(namespace).Create(service)
}
func createServiceOrFail(clientset *fedclientset.Clientset, namespace, name string) *v1.Service {
service, err := createService(clientset, namespace, name)
framework.ExpectNoError(err, "Creating service %q in namespace %q", service.Name, namespace)
By(fmt.Sprintf("Successfully created federated service %q in namespace %q", name, namespace))
return service
}
func deleteServiceOrFail(clientset *fedclientset.Clientset, namespace string, serviceName string, orphanDependents *bool) {
if clientset == nil || len(namespace) == 0 || len(serviceName) == 0 {
Fail(fmt.Sprintf("Internal error: invalid parameters passed to deleteServiceOrFail: clientset: %v, namespace: %v, service: %v", clientset, namespace, serviceName))
}
err := clientset.Services(namespace).Delete(serviceName, &v1.DeleteOptions{OrphanDependents: orphanDependents})
framework.ExpectNoError(err, "Error deleting service %q from namespace %q", serviceName, namespace)
// Wait for the service to be deleted.
err = wait.Poll(5*time.Second, 3*wait.ForeverTestTimeout, func() (bool, error) {
_, err := clientset.Core().Services(namespace).Get(serviceName, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
return true, nil
}
return false, err
})
if err != nil {
framework.Failf("Error in deleting service %s: %v", serviceName, err)
}
}
func cleanupServiceShardsAndProviderResources(namespace string, service *v1.Service, clusters map[string]*cluster) {
framework.Logf("Deleting service %q in %d clusters", service.Name, len(clusters))
for name, c := range clusters {
var cSvc *v1.Service
err := wait.PollImmediate(framework.Poll, FederatedServiceTimeout, func() (bool, error) {
var err error
cSvc, err = c.Clientset.Services(namespace).Get(service.Name, metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
// Get failed with an error, try again.
framework.Logf("Failed to find service %q in namespace %q, in cluster %q: %v. Trying again in %s", service.Name, namespace, name, err, framework.Poll)
return false, nil
} else if errors.IsNotFound(err) {
cSvc = nil
By(fmt.Sprintf("Service %q in namespace %q in cluster %q not found", service.Name, namespace, name))
return true, err
}
By(fmt.Sprintf("Service %q in namespace %q in cluster %q found", service.Name, namespace, name))
return true, err
})
if err != nil || cSvc == nil {
By(fmt.Sprintf("Failed to find service %q in namespace %q, in cluster %q in %s", service.Name, namespace, name, FederatedServiceTimeout))
continue
}
err = cleanupServiceShard(c.Clientset, name, namespace, cSvc, FederatedServiceTimeout)
if err != nil {
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q: %v", service.Name, namespace, name, err)
}
err = cleanupServiceShardLoadBalancer(name, cSvc, FederatedServiceTimeout)
if err != nil {
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, namespace, name)
}
}
}
func cleanupServiceShard(clientset *kubeclientset.Clientset, clusterName, namespace string, service *v1.Service, timeout time.Duration) error {
err := wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
err := clientset.Services(namespace).Delete(service.Name, &v1.DeleteOptions{})
if err != nil && !errors.IsNotFound(err) {
// Deletion failed with an error, try again.
framework.Logf("Failed to delete service %q in namespace %q, in cluster %q", service.Name, namespace, clusterName)
return false, nil
}
By(fmt.Sprintf("Service %q in namespace %q in cluster %q deleted", service.Name, namespace, clusterName))
return true, nil
})
return err
}
func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, timeout time.Duration) error {
provider := framework.TestContext.CloudConfig.Provider
if provider == nil {
return fmt.Errorf("cloud provider undefined")
}
internalSvc := &v1.Service{}
err := api.Scheme.Convert(service, internalSvc, nil)
if err != nil {
return fmt.Errorf("failed to convert versioned service object to internal type: %v", err)
}
err = wait.PollImmediate(framework.Poll, timeout, func() (bool, error) {
lbi, supported := provider.LoadBalancer()
if !supported {
return false, fmt.Errorf("%q doesn't support load balancers", provider.ProviderName())
}
err := lbi.EnsureLoadBalancerDeleted(clusterName, internalSvc)
if err != nil {
// Deletion failed with an error, try again.
framework.Logf("Failed to delete cloud provider resources for service %q in namespace %q, in cluster %q", service.Name, service.Namespace, clusterName)
return false, nil
}
By(fmt.Sprintf("Cloud provider resources for Service %q in namespace %q in cluster %q deleted", service.Name, service.Namespace, clusterName))
return true, nil
})
return err
}
func podExitCodeDetector(f *framework.Framework, name, namespace string, code int32) func() error {
// If we ever get any container logs, stash them here.
logs := ""
logerr := func(err error) error {
if err == nil {
return nil
}
if logs == "" {
return err
}
return fmt.Errorf("%s (%v)", logs, err)
}
return func() error {
pod, err := f.ClientSet.Core().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return logerr(err)
}
if len(pod.Status.ContainerStatuses) < 1 {
return logerr(fmt.Errorf("no container statuses"))
}
// Best effort attempt to grab pod logs for debugging
logs, err = framework.GetPodLogs(f.ClientSet, namespace, name, pod.Spec.Containers[0].Name)
if err != nil {
framework.Logf("Cannot fetch pod logs: %v", err)
}
status := pod.Status.ContainerStatuses[0]
if status.State.Terminated == nil {
return logerr(fmt.Errorf("container is not in terminated state"))
}
if status.State.Terminated.ExitCode == code {
return nil
}
return logerr(fmt.Errorf("exited %d", status.State.Terminated.ExitCode))
}
}
func discoverService(f *framework.Framework, name string, exists bool, podName string) {
command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
By(fmt.Sprintf("Looking up %q", name))
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "federated-service-discovery-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: command,
},
},
RestartPolicy: v1.RestartPolicyOnFailure,
},
}
nsName := f.FederationNamespace.Name
By(fmt.Sprintf("Creating pod %q in namespace %q", pod.Name, nsName))
_, err := f.ClientSet.Core().Pods(nsName).Create(pod)
framework.ExpectNoError(err, "Trying to create pod to run %q", command)
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
err := f.ClientSet.Core().Pods(nsName).Delete(podName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}()
if exists {
// TODO(mml): Eventually check the IP address is correct, too.
Eventually(podExitCodeDetector(f, podName, nsName, 0), 3*DNSTTL, time.Second*2).
Should(BeNil(), "%q should exit 0, but it never did", command)
} else {
Eventually(podExitCodeDetector(f, podName, nsName, 0), 3*DNSTTL, time.Second*2).
ShouldNot(BeNil(), "%q should eventually not exit 0, but it always did", command)
}
}
/*
createBackendPodsOrFail creates one pod in each cluster, and returns the created pods (in the same order as clusterClientSets).
If creation of any pod fails, the test fails (possibly with a partially created set of pods). No retries are attempted.
*/
func createBackendPodsOrFail(clusters map[string]*cluster, namespace string, name string) {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
// Namespace: namespace,
Labels: FederatedServiceLabels,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/echoserver:1.4",
},
},
RestartPolicy: v1.RestartPolicyAlways,
},
}
for name, c := range clusters {
By(fmt.Sprintf("Creating pod %q in namespace %q in cluster %q", pod.Name, namespace, name))
createdPod, err := c.Clientset.Core().Pods(namespace).Create(pod)
framework.ExpectNoError(err, "Creating pod %q in namespace %q in cluster %q", name, namespace, name)
By(fmt.Sprintf("Successfully created pod %q in namespace %q in cluster %q: %v", pod.Name, namespace, name, *createdPod))
c.backendPod = createdPod
}
}
/*
deleteOneBackendPodOrFail deletes exactly one backend pod which must not be nil
The test fails if there are any errors.
*/
func deleteOneBackendPodOrFail(c *cluster) {
pod := c.backendPod
Expect(pod).ToNot(BeNil())
err := c.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, v1.NewDeleteOptions(0))
if errors.IsNotFound(err) {
By(fmt.Sprintf("Pod %q in namespace %q in cluster %q does not exist. No need to delete it.", pod.Name, pod.Namespace, c.name))
} else {
framework.ExpectNoError(err, "Deleting pod %q in namespace %q from cluster %q", pod.Name, pod.Namespace, c.name)
}
By(fmt.Sprintf("Backend pod %q in namespace %q in cluster %q deleted or does not exist", pod.Name, pod.Namespace, c.name))
}
/*
deleteBackendPodsOrFail deletes one pod from each cluster that has one.
If deletion of any pod fails, the test fails (possibly with a partially deleted set of pods). No retries are attempted.
*/
func deleteBackendPodsOrFail(clusters map[string]*cluster, namespace string) {
for name, c := range clusters {
if c.backendPod != nil {
deleteOneBackendPodOrFail(c)
c.backendPod = nil
} else {
By(fmt.Sprintf("No backend pod to delete for cluster %q", name))
}
}
}

152
vendor/k8s.io/kubernetes/test/e2e/firewall.go generated vendored Normal file
View file

@ -0,0 +1,152 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Firewall rule", func() {
var firewall_test_name = "firewall-test"
f := framework.NewDefaultFramework(firewall_test_name)
var cs clientset.Interface
var cloudConfig framework.CloudConfig
var gceCloud *gcecloud.GCECloud
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
cs = f.ClientSet
cloudConfig = framework.TestContext.CloudConfig
gceCloud = cloudConfig.Provider.(*gcecloud.GCECloud)
})
// This test takes around 4 minutes to run
It("[Slow] [Serial] should create valid firewall rules for LoadBalancer type service", func() {
ns := f.Namespace.Name
// This source ranges is just used to examine we have exact same things on LB firewall rules
firewallTestSourceRanges := []string{"0.0.0.0/1", "128.0.0.0/1"}
serviceName := "firewall-test-loadbalancer"
jig := framework.NewServiceTestJig(cs, serviceName)
nodesNames := jig.GetNodesNames(framework.MaxNodesForEndpointsTests)
if len(nodesNames) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", nodesNames)
}
nodesSet := sets.NewString(nodesNames...)
// OnlyLocal service is needed to examine which exact nodes the requests are being forwarded to by the Load Balancer on GCE
By("Creating a LoadBalancer type service with onlyLocal annotation")
svc := jig.CreateOnlyLocalLoadBalancerService(ns, serviceName,
framework.LoadBalancerCreateTimeoutDefault, false, func(svc *v1.Service) {
svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: framework.FirewallTestHttpPort}}
svc.Spec.LoadBalancerSourceRanges = firewallTestSourceRanges
})
defer func() {
jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.Spec.Type = v1.ServiceTypeNodePort
svc.Spec.LoadBalancerSourceRanges = nil
})
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
svcExternalIP := svc.Status.LoadBalancer.Ingress[0].IP
By("Checking if service's firewall rules are correct")
nodeTags := framework.GetInstanceTags(cloudConfig, nodesNames[0])
expFw := framework.ConstructFirewallForLBService(svc, nodeTags.Items)
fw, err := gceCloud.GetFirewall(expFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
By(fmt.Sprintf("Creating netexec pods on at most %v nodes", framework.MaxNodesForEndpointsTests))
for i, nodeName := range nodesNames {
podName := fmt.Sprintf("netexec%v", i)
jig.LaunchNetexecPodOnNode(f, nodeName, podName, framework.FirewallTestHttpPort, framework.FirewallTestUdpPort, true)
defer func() {
framework.Logf("Cleaning up the netexec pod: %v", podName)
Expect(cs.Core().Pods(ns).Delete(podName, nil)).NotTo(HaveOccurred())
}()
}
// Send requests from outside of the cluster because internal traffic is whitelisted
By("Accessing the external service ip from outside, all non-master nodes should be reached")
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
// by removing the tag on one vm and make sure it doesn't get any traffic. This is an imperfect
// simulation, we really want to check that traffic doesn't reach a vm outside the GKE cluster, but
// that's much harder to do in the current e2e framework.
By("Removing tags from one of the nodes")
nodesSet.Delete(nodesNames[0])
removedTags := framework.SetInstanceTags(cloudConfig, nodesNames[0], []string{})
defer func() {
By("Adding tags back to the node and wait till the traffic is recovered")
nodesSet.Insert(nodesNames[0])
framework.SetInstanceTags(cloudConfig, nodesNames[0], removedTags)
// Make sure traffic is recovered before exit
Expect(framework.TestHitNodesFromOutside(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet)).NotTo(HaveOccurred())
}()
By("Accessing serivce through the external ip and examine got no response from the node without tags")
Expect(framework.TestHitNodesFromOutsideWithCount(svcExternalIP, framework.FirewallTestHttpPort, framework.FirewallTimeoutDefault, nodesSet, 15)).NotTo(HaveOccurred())
})
It("should have correct firewall rules for e2e cluster", func() {
By("Gathering firewall related information")
masterTags := framework.GetInstanceTags(cloudConfig, cloudConfig.MasterName)
Expect(len(masterTags.Items)).Should(Equal(1))
nodes := framework.GetReadySchedulableNodesOrDie(cs)
if len(nodes.Items) <= 0 {
framework.Failf("Expect at least 1 node, got: %v", len(nodes.Items))
}
nodeTags := framework.GetInstanceTags(cloudConfig, nodes.Items[0].Name)
Expect(len(nodeTags.Items)).Should(Equal(1))
By("Checking if e2e firewall rules are correct")
for _, expFw := range framework.GetE2eFirewalls(cloudConfig.MasterName, masterTags.Items[0], nodeTags.Items[0], cloudConfig.Network) {
fw, err := gceCloud.GetFirewall(expFw.Name)
Expect(err).NotTo(HaveOccurred())
Expect(framework.VerifyFirewallRule(fw, expFw, cloudConfig.Network, false)).NotTo(HaveOccurred())
}
By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero())
masterAddr := framework.GetMasterAddress(cs)
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.ControllerManagerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, framework.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
})
})

130
vendor/k8s.io/kubernetes/test/e2e/framework/BUILD generated vendored Normal file
View file

@ -0,0 +1,130 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"authorizer_util.go",
"cleanup.go",
"exec_util.go",
"federation_util.go",
"firewall_util.go",
"framework.go",
"get-kubemark-resource-usage.go",
"google_compute.go",
"kubelet_stats.go",
"log_size_monitoring.go",
"metrics_util.go",
"networking_utils.go",
"nodes_util.go",
"perf_util.go",
"pods.go",
"resource_usage_gatherer.go",
"service_util.go",
"test_context.go",
"util.go",
],
tags = ["automanaged"],
deps = [
"//federation/apis/federation/v1beta1:go_default_library",
"//federation/client/clientset_generated/federation_clientset:go_default_library",
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/api/v1/service:go_default_library",
"//pkg/api/validation:go_default_library",
"//pkg/apis/apps/v1beta1:go_default_library",
"//pkg/apis/authorization/v1beta1:go_default_library",
"//pkg/apis/batch:go_default_library",
"//pkg/apis/batch/v1:go_default_library",
"//pkg/apis/componentconfig:go_default_library",
"//pkg/apis/extensions:go_default_library",
"//pkg/apis/extensions/v1beta1:go_default_library",
"//pkg/apis/rbac/v1alpha1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/authorization/v1beta1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1:go_default_library",
"//pkg/client/clientset_generated/internalclientset:go_default_library",
"//pkg/client/conditions:go_default_library",
"//pkg/client/restclient:go_default_library",
"//pkg/client/typed/discovery:go_default_library",
"//pkg/client/typed/dynamic:go_default_library",
"//pkg/client/unversioned/clientcmd:go_default_library",
"//pkg/client/unversioned/clientcmd/api:go_default_library",
"//pkg/client/unversioned/remotecommand:go_default_library",
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/deployment/util:go_default_library",
"//pkg/fields:go_default_library",
"//pkg/kubectl:go_default_library",
"//pkg/kubelet/api/v1alpha1/stats:go_default_library",
"//pkg/kubelet/metrics:go_default_library",
"//pkg/kubelet/server/remotecommand:go_default_library",
"//pkg/kubelet/server/stats:go_default_library",
"//pkg/kubelet/util/format:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/metrics:go_default_library",
"//pkg/ssh:go_default_library",
"//pkg/util/exec:go_default_library",
"//pkg/util/intstr:go_default_library",
"//pkg/util/labels:go_default_library",
"//pkg/util/system:go_default_library",
"//pkg/util/uuid:go_default_library",
"//pkg/util/version:go_default_library",
"//plugin/pkg/scheduler/algorithm/predicates:go_default_library",
"//plugin/pkg/scheduler/schedulercache:go_default_library",
"//test/e2e/perftype:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/google/cadvisor/info/v1",
"//vendor:github.com/onsi/ginkgo",
"//vendor:github.com/onsi/ginkgo/config",
"//vendor:github.com/onsi/gomega",
"//vendor:github.com/onsi/gomega/types",
"//vendor:github.com/prometheus/common/expfmt",
"//vendor:github.com/prometheus/common/model",
"//vendor:github.com/spf13/viper",
"//vendor:golang.org/x/crypto/ssh",
"//vendor:golang.org/x/net/websocket",
"//vendor:google.golang.org/api/compute/v1",
"//vendor:google.golang.org/api/googleapi",
"//vendor:gopkg.in/yaml.v2",
"//vendor:k8s.io/apimachinery/pkg/api/errors",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1/unstructured",
"//vendor:k8s.io/apimachinery/pkg/labels",
"//vendor:k8s.io/apimachinery/pkg/runtime",
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
"//vendor:k8s.io/apimachinery/pkg/types",
"//vendor:k8s.io/apimachinery/pkg/util/errors",
"//vendor:k8s.io/apimachinery/pkg/util/net",
"//vendor:k8s.io/apimachinery/pkg/util/rand",
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
"//vendor:k8s.io/apimachinery/pkg/util/sets",
"//vendor:k8s.io/apimachinery/pkg/util/validation",
"//vendor:k8s.io/apimachinery/pkg/util/wait",
"//vendor:k8s.io/apimachinery/pkg/watch",
"//vendor:k8s.io/client-go/kubernetes",
"//vendor:k8s.io/client-go/rest",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,112 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"time"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
legacyv1 "k8s.io/kubernetes/pkg/api/v1"
authorizationv1beta1 "k8s.io/kubernetes/pkg/apis/authorization/v1beta1"
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
v1beta1authorization "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/authorization/v1beta1"
v1alpha1rbac "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/rbac/v1alpha1"
)
const (
policyCachePollInterval = 100 * time.Millisecond
policyCachePollTimeout = 5 * time.Second
)
// WaitForAuthorizationUpdate checks if the given user can perform the named verb and action.
// If policyCachePollTimeout is reached without the expected condition matching, an error is returned
func WaitForAuthorizationUpdate(c v1beta1authorization.SubjectAccessReviewsGetter, user, namespace, verb string, resource schema.GroupResource, allowed bool) error {
review := &authorizationv1beta1.SubjectAccessReview{
Spec: authorizationv1beta1.SubjectAccessReviewSpec{
ResourceAttributes: &authorizationv1beta1.ResourceAttributes{
Group: resource.Group,
Verb: verb,
Resource: resource.Resource,
Namespace: namespace,
},
User: user,
},
}
err := wait.Poll(policyCachePollInterval, policyCachePollTimeout, func() (bool, error) {
response, err := c.SubjectAccessReviews().Create(review)
// GKE doesn't enable the SAR endpoint. Without this endpoint, we cannot determine if the policy engine
// has adjusted as expected. In this case, simply wait one second and hope it's up to date
if apierrors.IsNotFound(err) {
fmt.Printf("SubjectAccessReview endpoint is missing\n")
time.Sleep(1 * time.Second)
return true, nil
}
if err != nil {
return false, err
}
if response.Status.Allowed != allowed {
return false, nil
}
return true, nil
})
return err
}
// BindClusterRole binds the cluster role at the cluster scope
func BindClusterRole(c v1alpha1rbac.ClusterRoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1alpha1.Subject) {
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.ClusterRoleBindings().Create(&rbacv1alpha1.ClusterRoleBinding{
ObjectMeta: legacyv1.ObjectMeta{
Name: ns + "--" + clusterRole,
},
RoleRef: rbacv1alpha1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: clusterRole,
},
Subjects: subjects,
})
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding clusterrole/%s for %q for %v\n", clusterRole, ns, subjects)
}
}
// BindClusterRoleInNamespace binds the cluster role at the namespace scope
func BindClusterRoleInNamespace(c v1alpha1rbac.RoleBindingsGetter, clusterRole, ns string, subjects ...rbacv1alpha1.Subject) {
// Since the namespace names are unique, we can leave this lying around so we don't have to race any caches
_, err := c.RoleBindings(ns).Create(&rbacv1alpha1.RoleBinding{
ObjectMeta: legacyv1.ObjectMeta{
Name: ns + "--" + clusterRole,
},
RoleRef: rbacv1alpha1.RoleRef{
APIGroup: "rbac.authorization.k8s.io",
Kind: "ClusterRole",
Name: clusterRole,
},
Subjects: subjects,
})
// if we failed, don't fail the entire test because it may still work. RBAC may simply be disabled.
if err != nil {
fmt.Printf("Error binding clusterrole/%s into %q for %v\n", clusterRole, ns, subjects)
}
}

61
vendor/k8s.io/kubernetes/test/e2e/framework/cleanup.go generated vendored Normal file
View file

@ -0,0 +1,61 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import "sync"
type CleanupActionHandle *int
var cleanupActionsLock sync.Mutex
var cleanupActions = map[CleanupActionHandle]func(){}
// AddCleanupAction installs a function that will be called in the event of the
// whole test being terminated. This allows arbitrary pieces of the overall
// test to hook into SynchronizedAfterSuite().
func AddCleanupAction(fn func()) CleanupActionHandle {
p := CleanupActionHandle(new(int))
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
cleanupActions[p] = fn
return p
}
// RemoveCleanupAction removes a function that was installed by
// AddCleanupAction.
func RemoveCleanupAction(p CleanupActionHandle) {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
delete(cleanupActions, p)
}
// RunCleanupActions runs all functions installed by AddCleanupAction. It does
// not remove them (see RemoveCleanupAction) but it does run unlocked, so they
// may remove themselves.
func RunCleanupActions() {
list := []func(){}
func() {
cleanupActionsLock.Lock()
defer cleanupActionsLock.Unlock()
for _, fn := range cleanupActions {
list = append(list, fn)
}
}()
// Run unlocked.
for _, fn := range list {
fn()
}
}

View file

@ -0,0 +1,149 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"io"
"net/url"
"strings"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
. "github.com/onsi/gomega"
)
// ExecOptions passed to ExecWithOptions
type ExecOptions struct {
Command []string
Namespace string
PodName string
ContainerName string
Stdin io.Reader
CaptureStdout bool
CaptureStderr bool
// If false, whitespace in std{err,out} will be removed.
PreserveWhitespace bool
}
// ExecWithOptions executes a command in the specified container,
// returning stdout, stderr and error. `options` allowed for
// additional parameters to be passed.
func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error) {
Logf("ExecWithOptions %+v", options)
config, err := LoadConfig()
Expect(err).NotTo(HaveOccurred(), "failed to load restclient config")
const tty = false
req := f.ClientSet.Core().RESTClient().Post().
Resource("pods").
Name(options.PodName).
Namespace(options.Namespace).
SubResource("exec").
Param("container", options.ContainerName)
req.VersionedParams(&v1.PodExecOptions{
Container: options.ContainerName,
Command: options.Command,
Stdin: options.Stdin != nil,
Stdout: options.CaptureStdout,
Stderr: options.CaptureStderr,
TTY: tty,
}, api.ParameterCodec)
var stdout, stderr bytes.Buffer
err = execute("POST", req.URL(), config, options.Stdin, &stdout, &stderr, tty)
if options.PreserveWhitespace {
return stdout.String(), stderr.String(), err
}
return strings.TrimSpace(stdout.String()), strings.TrimSpace(stderr.String()), err
}
// ExecCommandInContainerWithFullOutput executes a command in the
// specified container and return stdout, stderr and error
func (f *Framework) ExecCommandInContainerWithFullOutput(podName, containerName string, cmd ...string) (string, string, error) {
return f.ExecWithOptions(ExecOptions{
Command: cmd,
Namespace: f.Namespace.Name,
PodName: podName,
ContainerName: containerName,
Stdin: nil,
CaptureStdout: true,
CaptureStderr: true,
PreserveWhitespace: false,
})
}
// ExecCommandInContainer executes a command in the specified container.
func (f *Framework) ExecCommandInContainer(podName, containerName string, cmd ...string) string {
stdout, stderr, err := f.ExecCommandInContainerWithFullOutput(podName, containerName, cmd...)
Logf("Exec stderr: %q", stderr)
Expect(err).NotTo(HaveOccurred(),
"failed to execute command in pod %v, container %v: %v",
podName, containerName, err)
return stdout
}
func (f *Framework) ExecShellInContainer(podName, containerName string, cmd string) string {
return f.ExecCommandInContainer(podName, containerName, "/bin/sh", "-c", cmd)
}
func (f *Framework) ExecCommandInPod(podName string, cmd ...string) string {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty())
return f.ExecCommandInContainer(podName, pod.Spec.Containers[0].Name, cmd...)
}
func (f *Framework) ExecCommandInPodWithFullOutput(podName string, cmd ...string) (string, string, error) {
pod, err := f.PodClient().Get(podName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), "failed to get pod")
Expect(pod.Spec.Containers).NotTo(BeEmpty())
return f.ExecCommandInContainerWithFullOutput(podName, pod.Spec.Containers[0].Name, cmd...)
}
func (f *Framework) ExecShellInPod(podName string, cmd string) string {
return f.ExecCommandInPod(podName, "/bin/sh", "-c", cmd)
}
func (f *Framework) ExecShellInPodWithFullOutput(podName string, cmd string) (string, string, error) {
return f.ExecCommandInPodWithFullOutput(podName, "/bin/sh", "-c", cmd)
}
func execute(method string, url *url.URL, config *restclient.Config, stdin io.Reader, stdout, stderr io.Writer, tty bool) error {
exec, err := remotecommand.NewExecutor(config, method, url)
if err != nil {
return err
}
return exec.Stream(remotecommand.StreamOptions{
SupportedProtocols: remotecommandserver.SupportedStreamingProtocols,
Stdin: stdin,
Stdout: stdout,
Stderr: stderr,
Tty: tty,
})
}

View file

@ -0,0 +1,42 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"regexp"
validationutil "k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/api/validation"
)
// GetValidDNSSubdomainName massages the given name to be a valid dns subdomain name.
// Most resources (such as secrets, clusters) require the names to be valid dns subdomain.
// This is a generic function (not specific to federation). Should be moved to a more generic location if others want to use it.
func GetValidDNSSubdomainName(name string) (string, error) {
// "_" are not allowed. Replace them by "-".
name = regexp.MustCompile("_").ReplaceAllLiteralString(name, "-")
maxLength := validationutil.DNS1123SubdomainMaxLength
if len(name) > maxLength {
name = name[0 : maxLength-1]
}
// Verify that name now passes the validation.
if errors := validation.NameIsDNSSubdomain(name, false); len(errors) != 0 {
return "", fmt.Errorf("errors in converting name to a valid DNS subdomain %s", errors)
}
return name, nil
}

View file

@ -0,0 +1,328 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
. "github.com/onsi/gomega"
compute "google.golang.org/api/compute/v1"
)
const (
FirewallTimeoutDefault = 3 * time.Minute
FirewallTestTcpTimeout = time.Duration(1 * time.Second)
// Set ports outside of 30000-32767, 80 and 8080 to avoid being whitelisted by the e2e cluster
FirewallTestHttpPort = int32(29999)
FirewallTestUdpPort = int32(29998)
)
// MakeFirewallNameForLBService return the expected firewall name for a LB service.
// This should match the formatting of makeFirewallName() in pkg/cloudprovider/providers/gce/gce.go
func MakeFirewallNameForLBService(name string) string {
return fmt.Sprintf("k8s-fw-%s", name)
}
// ConstructFirewallForLBService returns the expected GCE firewall rule for a loadbalancer type service
func ConstructFirewallForLBService(svc *v1.Service, nodesTags []string) *compute.Firewall {
if svc.Spec.Type != v1.ServiceTypeLoadBalancer {
Failf("can not construct firewall rule for non-loadbalancer type service")
}
fw := compute.Firewall{}
fw.Name = MakeFirewallNameForLBService(cloudprovider.GetLoadBalancerName(svc))
fw.TargetTags = nodesTags
if svc.Spec.LoadBalancerSourceRanges == nil {
fw.SourceRanges = []string{"0.0.0.0/0"}
} else {
fw.SourceRanges = svc.Spec.LoadBalancerSourceRanges
}
for _, sp := range svc.Spec.Ports {
fw.Allowed = append(fw.Allowed, &compute.FirewallAllowed{
IPProtocol: strings.ToLower(string(sp.Protocol)),
Ports: []string{strconv.Itoa(int(sp.Port))},
})
}
return &fw
}
// GetNodeTags gets tags from one of the Kubernetes nodes
func GetNodeTags(c clientset.Interface, cloudConfig CloudConfig) *compute.Tags {
nodes := GetReadySchedulableNodesOrDie(c)
Expect(len(nodes.Items) > 0).Should(BeTrue())
nodeTags := GetInstanceTags(cloudConfig, nodes.Items[0].Name)
return nodeTags
}
// GetInstanceTags gets tags from GCE instance with given name.
func GetInstanceTags(cloudConfig CloudConfig, instanceName string) *compute.Tags {
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
res, err := gceCloud.GetComputeService().Instances.Get(cloudConfig.ProjectID, cloudConfig.Zone,
instanceName).Do()
if err != nil {
Failf("Failed to get instance tags for %v: %v", instanceName, err)
}
return res.Tags
}
// SetInstanceTags sets tags on GCE instance with given name.
func SetInstanceTags(cloudConfig CloudConfig, instanceName string, tags []string) []string {
gceCloud := cloudConfig.Provider.(*gcecloud.GCECloud)
// Re-get instance everytime because we need the latest fingerprint for updating metadata
resTags := GetInstanceTags(cloudConfig, instanceName)
_, err := gceCloud.GetComputeService().Instances.SetTags(
cloudConfig.ProjectID, cloudConfig.Zone, instanceName,
&compute.Tags{Fingerprint: resTags.Fingerprint, Items: tags}).Do()
if err != nil {
Failf("failed to set instance tags: %v", err)
}
Logf("Sent request to set tags %v on instance: %v", tags, instanceName)
return resTags.Items
}
// GetInstancePrefix returns the INSTANCE_PREFIX env we set for e2e cluster.
// From cluster/gce/config-test.sh, master name is set up using below format:
// MASTER_NAME="${INSTANCE_PREFIX}-master"
func GetInstancePrefix(masterName string) (string, error) {
if !strings.HasSuffix(masterName, "-master") {
return "", fmt.Errorf("unexpected master name format: %v", masterName)
}
return masterName[:len(masterName)-7], nil
}
// GetClusterName returns the CLUSTER_NAME env we set for e2e cluster.
// From cluster/gce/config-test.sh, cluster name is set up using below format:
// CLUSTER_NAME="${CLUSTER_NAME:-${INSTANCE_PREFIX}}"
func GetClusterName(instancePrefix string) string {
return instancePrefix
}
// GetClusterIpRange returns the CLUSTER_IP_RANGE env we set for e2e cluster.
// From cluster/gce/config-test.sh, cluster ip range is set up using below command:
// CLUSTER_IP_RANGE="${CLUSTER_IP_RANGE:-10.180.0.0/14}"
// Warning: this need to be consistent with the CLUSTER_IP_RANGE in startup scripts,
// which is hardcoded currently.
func GetClusterIpRange() string {
return "10.180.0.0/14"
}
// GetE2eFirewalls returns all firewall rules we create for an e2e cluster.
// From cluster/gce/util.sh, all firewall rules should be consistent with the ones created by startup scripts.
func GetE2eFirewalls(masterName, masterTag, nodeTag, network string) []*compute.Firewall {
instancePrefix, err := GetInstancePrefix(masterName)
Expect(err).NotTo(HaveOccurred())
clusterName := GetClusterName(instancePrefix)
clusterIpRange := GetClusterIpRange()
fws := []*compute.Firewall{}
fws = append(fws, &compute.Firewall{
Name: clusterName + "-default-internal-master",
SourceRanges: []string{"10.0.0.0/8"},
TargetTags: []string{masterTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"1-2379"},
},
{
IPProtocol: "tcp",
Ports: []string{"2382-65535"},
},
{
IPProtocol: "udp",
Ports: []string{"1-65535"},
},
{
IPProtocol: "icmp",
},
},
})
fws = append(fws, &compute.Firewall{
Name: clusterName + "-default-internal-node",
SourceRanges: []string{"10.0.0.0/8"},
TargetTags: []string{nodeTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"1-65535"},
},
{
IPProtocol: "udp",
Ports: []string{"1-65535"},
},
{
IPProtocol: "icmp",
},
},
})
fws = append(fws, &compute.Firewall{
Name: network + "-default-ssh",
SourceRanges: []string{"0.0.0.0/0"},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"22"},
},
},
})
fws = append(fws, &compute.Firewall{
Name: masterName + "-etcd",
SourceTags: []string{masterTag},
TargetTags: []string{masterTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"2380"},
},
{
IPProtocol: "tcp",
Ports: []string{"2381"},
},
},
})
fws = append(fws, &compute.Firewall{
Name: masterName + "-https",
SourceRanges: []string{"0.0.0.0/0"},
TargetTags: []string{masterTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"443"},
},
},
})
fws = append(fws, &compute.Firewall{
Name: nodeTag + "-all",
SourceRanges: []string{clusterIpRange},
TargetTags: []string{nodeTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
},
{
IPProtocol: "udp",
},
{
IPProtocol: "icmp",
},
{
IPProtocol: "esp",
},
{
IPProtocol: "ah",
},
{
IPProtocol: "sctp",
},
},
})
fws = append(fws, &compute.Firewall{
Name: nodeTag + "-" + instancePrefix + "-http-alt",
SourceRanges: []string{"0.0.0.0/0"},
TargetTags: []string{nodeTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"80"},
},
{
IPProtocol: "tcp",
Ports: []string{"8080"},
},
},
})
fws = append(fws, &compute.Firewall{
Name: nodeTag + "-" + instancePrefix + "-nodeports",
SourceRanges: []string{"0.0.0.0/0"},
TargetTags: []string{nodeTag},
Allowed: []*compute.FirewallAllowed{
{
IPProtocol: "tcp",
Ports: []string{"30000-32767"},
},
{
IPProtocol: "udp",
Ports: []string{"30000-32767"},
},
},
})
return fws
}
// PackProtocolsPortsFromFirewall packs protocols and ports in an unified way for verification.
func PackProtocolsPortsFromFirewall(alloweds []*compute.FirewallAllowed) []string {
protocolPorts := []string{}
for _, allowed := range alloweds {
for _, port := range allowed.Ports {
protocolPorts = append(protocolPorts, strings.ToLower(allowed.IPProtocol+"/"+port))
}
}
return protocolPorts
}
// SameStringArray verifies whether two string arrays have the same strings, return error if not.
// Order does not matter.
// When `include` is set to true, verifies whether result includes all elements from expected.
func SameStringArray(result, expected []string, include bool) error {
res := sets.NewString(result...)
exp := sets.NewString(expected...)
if !include {
diff := res.Difference(exp)
if len(diff) != 0 {
return fmt.Errorf("found differences: %v", diff)
}
} else {
if !res.IsSuperset(exp) {
return fmt.Errorf("some elements are missing: expected %v, got %v", expected, result)
}
}
return nil
}
// VerifyFirewallRule verifies whether the result firewall is consistent with the expected firewall.
// When `portsSubset` is false, match given ports exactly. Otherwise, only check ports are included.
func VerifyFirewallRule(res, exp *compute.Firewall, network string, portsSubset bool) error {
if res.Name != exp.Name {
return fmt.Errorf("incorrect name: %v, expected %v", res.Name, exp.Name)
}
// Sample Network value: https://www.googleapis.com/compute/v1/projects/{project-id}/global/networks/e2e
if !strings.HasSuffix(res.Network, "/"+network) {
return fmt.Errorf("incorrect network: %v, expected ends with: %v", res.Network, "/"+network)
}
if err := SameStringArray(PackProtocolsPortsFromFirewall(res.Allowed),
PackProtocolsPortsFromFirewall(exp.Allowed), portsSubset); err != nil {
return fmt.Errorf("incorrect allowed protocols ports: %v", err)
}
if err := SameStringArray(res.SourceRanges, exp.SourceRanges, false); err != nil {
return fmt.Errorf("incorrect source ranges %v, expected %v: %v", res.SourceRanges, exp.SourceRanges, err)
}
if err := SameStringArray(res.SourceTags, exp.SourceTags, false); err != nil {
return fmt.Errorf("incorrect source tags %v, expected %v: %v", res.SourceTags, exp.SourceTags, err)
}
if err := SameStringArray(res.TargetTags, exp.TargetTags, false); err != nil {
return fmt.Errorf("incorrect target tags %v, expected %v: %v", res.TargetTags, exp.TargetTags, err)
}
return nil
}

1002
vendor/k8s.io/kubernetes/test/e2e/framework/framework.go generated vendored Normal file

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,52 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bufio"
"fmt"
"strings"
)
type KubemarkResourceUsage struct {
Name string
MemoryWorkingSetInBytes uint64
CPUUsageInCores float64
}
// TODO: figure out how to move this to kubemark directory (need to factor test SSH out of e2e framework)
func GetKubemarkMasterComponentsResourceUsage() map[string]*KubemarkResourceUsage {
result := make(map[string]*KubemarkResourceUsage)
sshResult, err := SSH("ps ax -o %cpu,rss,command | tail -n +2 | grep kube | sed 's/\\s+/ /g'", GetMasterHost()+":22", TestContext.Provider)
if err != nil {
Logf("Error when trying to SSH to master machine. Skipping probe")
return nil
}
scanner := bufio.NewScanner(strings.NewReader(sshResult.Stdout))
for scanner.Scan() {
var cpu float64
var mem uint64
var name string
fmt.Sscanf(strings.TrimSpace(scanner.Text()), "%f %d /usr/local/bin/kube-%s", &cpu, &mem, &name)
if name != "" {
// Gatherer expects pod_name/container_name format
fullName := name + "/" + name
result[fullName] = &KubemarkResourceUsage{Name: fullName, MemoryWorkingSetInBytes: mem * 1024, CPUUsageInCores: cpu / 100}
}
}
return result
}

View file

@ -0,0 +1,98 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"os/exec"
"regexp"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
)
// TODO: These should really just use the GCE API client library or at least use
// better formatted output from the --format flag.
func CreateGCEStaticIP(name string) (string, error) {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
var outputBytes []byte
var err error
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return "", fmt.Errorf("failed to convert zone to region: %v", err)
}
glog.Infof("Creating static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
for attempts := 0; attempts < 4; attempts++ {
outputBytes, err = exec.Command("gcloud", "compute", "addresses", "create",
name, "--project", TestContext.CloudConfig.ProjectID,
"--region", region, "-q").CombinedOutput()
if err == nil {
break
}
glog.Errorf("output from failed attempt to create static IP: %s", outputBytes)
time.Sleep(time.Duration(5*attempts) * time.Second)
}
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return "", fmt.Errorf("failed to create static IP: %s", outputBytes)
}
output := string(outputBytes)
if strings.Contains(output, "RESERVED") {
r, _ := regexp.Compile("[0-9]+\\.[0-9]+\\.[0-9]+\\.[0-9]+")
staticIP := r.FindString(output)
if staticIP == "" {
return "", fmt.Errorf("static IP not found in gcloud command output: %v", output)
} else {
return staticIP, nil
}
} else {
return "", fmt.Errorf("static IP %q could not be reserved: %v", name, output)
}
}
func DeleteGCEStaticIP(name string) error {
// gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// abshah@abhidesk:~/go/src/code.google.com/p/google-api-go-client/compute/v1$ gcloud compute --project "abshah-kubernetes-001" addresses create "test-static-ip" --region "us-central1"
// Created [https://www.googleapis.com/compute/v1/projects/abshah-kubernetes-001/regions/us-central1/addresses/test-static-ip].
// NAME REGION ADDRESS STATUS
// test-static-ip us-central1 104.197.143.7 RESERVED
region, err := gce.GetGCERegion(TestContext.CloudConfig.Zone)
if err != nil {
return fmt.Errorf("failed to convert zone to region: %v", err)
}
glog.Infof("Deleting static IP with name %q in project %q in region %q", name, TestContext.CloudConfig.ProjectID, region)
outputBytes, err := exec.Command("gcloud", "compute", "addresses", "delete",
name, "--project", TestContext.CloudConfig.ProjectID,
"--region", region, "-q").CombinedOutput()
if err != nil {
// Ditch the error, since the stderr in the output is what actually contains
// any useful info.
return fmt.Errorf("failed to delete static IP %q: %v", name, string(outputBytes))
}
return nil
}

View file

@ -0,0 +1,885 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"fmt"
"sort"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/prometheus/common/model"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/metrics"
)
// KubeletMetric stores metrics scraped from the kubelet server's /metric endpoint.
// TODO: Get some more structure around the metrics and this type
type KubeletLatencyMetric struct {
// eg: list, info, create
Operation string
// eg: sync_pods, pod_worker
Method string
// 0 <= quantile <=1, e.g. 0.95 is 95%tile, 0.5 is median.
Quantile float64
Latency time.Duration
}
// KubeletMetricByLatency implements sort.Interface for []KubeletMetric based on
// the latency field.
type KubeletLatencyMetrics []KubeletLatencyMetric
func (a KubeletLatencyMetrics) Len() int { return len(a) }
func (a KubeletLatencyMetrics) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a KubeletLatencyMetrics) Less(i, j int) bool { return a[i].Latency > a[j].Latency }
// If a apiserver client is passed in, the function will try to get kubelet metrics from metrics grabber;
// or else, the function will try to get kubelet metrics directly from the node.
func getKubeletMetricsFromNode(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
if c == nil {
return metrics.GrabKubeletMetricsWithoutProxy(nodeName)
}
grabber, err := metrics.NewMetricsGrabber(c, true, false, false, false)
if err != nil {
return metrics.KubeletMetrics{}, err
}
return grabber.GrabFromKubelet(nodeName)
}
// getKubeletMetrics gets all metrics in kubelet subsystem from specified node and trims
// the subsystem prefix.
func getKubeletMetrics(c clientset.Interface, nodeName string) (metrics.KubeletMetrics, error) {
ms, err := getKubeletMetricsFromNode(c, nodeName)
if err != nil {
return metrics.KubeletMetrics{}, err
}
kubeletMetrics := make(metrics.KubeletMetrics)
for name, samples := range ms {
const prefix = kubeletmetrics.KubeletSubsystem + "_"
if !strings.HasPrefix(name, prefix) {
// Not a kubelet metric.
continue
}
method := strings.TrimPrefix(name, prefix)
kubeletMetrics[method] = samples
}
return kubeletMetrics, nil
}
// GetKubeletLatencyMetrics gets all latency related kubelet metrics. Note that the KubeletMetrcis
// passed in should not contain subsystem prefix.
func GetKubeletLatencyMetrics(ms metrics.KubeletMetrics) KubeletLatencyMetrics {
latencyMethods := sets.NewString(
kubeletmetrics.PodWorkerLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.SyncPodsLatencyKey,
kubeletmetrics.PodStartLatencyKey,
kubeletmetrics.PodStatusLatencyKey,
kubeletmetrics.ContainerManagerOperationsKey,
kubeletmetrics.DockerOperationsLatencyKey,
kubeletmetrics.PodWorkerStartLatencyKey,
kubeletmetrics.PLEGRelistLatencyKey,
)
var latencyMetrics KubeletLatencyMetrics
for method, samples := range ms {
if !latencyMethods.Has(method) {
continue
}
for _, sample := range samples {
latency := sample.Value
operation := string(sample.Metric["operation_type"])
var quantile float64
if val, ok := sample.Metric[model.QuantileLabel]; ok {
var err error
if quantile, err = strconv.ParseFloat(string(val), 64); err != nil {
continue
}
}
latencyMetrics = append(latencyMetrics, KubeletLatencyMetric{
Operation: operation,
Method: method,
Quantile: quantile,
Latency: time.Duration(int64(latency)) * time.Microsecond,
})
}
}
return latencyMetrics
}
// RuntimeOperationMonitor is the tool getting and parsing docker operation metrics.
type RuntimeOperationMonitor struct {
client clientset.Interface
nodesRuntimeOps map[string]NodeRuntimeOperationErrorRate
}
// NodeRuntimeOperationErrorRate is the runtime operation error rate on one node.
type NodeRuntimeOperationErrorRate map[string]*RuntimeOperationErrorRate
// RuntimeOperationErrorRate is the error rate of a specified runtime operation.
type RuntimeOperationErrorRate struct {
TotalNumber float64
ErrorRate float64
TimeoutRate float64
}
func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor {
m := &RuntimeOperationMonitor{
client: c,
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
}
nodes, err := m.client.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
}
for _, node := range nodes.Items {
m.nodesRuntimeOps[node.Name] = make(NodeRuntimeOperationErrorRate)
}
// Initialize the runtime operation error rate
m.GetRuntimeOperationErrorRate()
return m
}
// GetRuntimeOperationErrorRate gets runtime operation records from kubelet metrics and calculate
// error rates of all runtime operations.
func (m *RuntimeOperationMonitor) GetRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
for node := range m.nodesRuntimeOps {
nodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil {
Logf("GetRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue
}
m.nodesRuntimeOps[node] = nodeResult
}
return m.nodesRuntimeOps
}
// GetLatestRuntimeOperationErrorRate gets latest error rate and timeout rate from last observed RuntimeOperationErrorRate.
func (m *RuntimeOperationMonitor) GetLatestRuntimeOperationErrorRate() map[string]NodeRuntimeOperationErrorRate {
result := make(map[string]NodeRuntimeOperationErrorRate)
for node := range m.nodesRuntimeOps {
result[node] = make(NodeRuntimeOperationErrorRate)
oldNodeResult := m.nodesRuntimeOps[node]
curNodeResult, err := getNodeRuntimeOperationErrorRate(m.client, node)
if err != nil {
Logf("GetLatestRuntimeOperationErrorRate: unable to get kubelet metrics from node %q: %v", node, err)
continue
}
for op, cur := range curNodeResult {
t := *cur
if old, found := oldNodeResult[op]; found {
t.ErrorRate = (t.ErrorRate*t.TotalNumber - old.ErrorRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
t.TimeoutRate = (t.TimeoutRate*t.TotalNumber - old.TimeoutRate*old.TotalNumber) / (t.TotalNumber - old.TotalNumber)
t.TotalNumber -= old.TotalNumber
}
result[node][op] = &t
}
m.nodesRuntimeOps[node] = curNodeResult
}
return result
}
// FormatRuntimeOperationErrorRate formats the runtime operation error rate to string.
func FormatRuntimeOperationErrorRate(nodesResult map[string]NodeRuntimeOperationErrorRate) string {
lines := []string{}
for node, nodeResult := range nodesResult {
lines = append(lines, fmt.Sprintf("node %q runtime operation error rate:", node))
for op, result := range nodeResult {
line := fmt.Sprintf("operation %q: total - %.0f; error rate - %f; timeout rate - %f", op,
result.TotalNumber, result.ErrorRate, result.TimeoutRate)
lines = append(lines, line)
}
lines = append(lines, fmt.Sprintln())
}
return strings.Join(lines, "\n")
}
// getNodeRuntimeOperationErrorRate gets runtime operation error rate from specified node.
func getNodeRuntimeOperationErrorRate(c clientset.Interface, node string) (NodeRuntimeOperationErrorRate, error) {
result := make(NodeRuntimeOperationErrorRate)
ms, err := getKubeletMetrics(c, node)
if err != nil {
return result, err
}
// If no corresponding metrics are found, the returned samples will be empty. Then the following
// loop will be skipped automatically.
allOps := ms[kubeletmetrics.DockerOperationsKey]
errOps := ms[kubeletmetrics.DockerOperationsErrorsKey]
timeoutOps := ms[kubeletmetrics.DockerOperationsTimeoutKey]
for _, sample := range allOps {
operation := string(sample.Metric["operation_type"])
result[operation] = &RuntimeOperationErrorRate{TotalNumber: float64(sample.Value)}
}
for _, sample := range errOps {
operation := string(sample.Metric["operation_type"])
// Should always find the corresponding item, just in case
if _, found := result[operation]; found {
result[operation].ErrorRate = float64(sample.Value) / result[operation].TotalNumber
}
}
for _, sample := range timeoutOps {
operation := string(sample.Metric["operation_type"])
if _, found := result[operation]; found {
result[operation].TimeoutRate = float64(sample.Value) / result[operation].TotalNumber
}
}
return result, nil
}
// HighLatencyKubeletOperations logs and counts the high latency metrics exported by the kubelet server via /metrics.
func HighLatencyKubeletOperations(c clientset.Interface, threshold time.Duration, nodeName string, logFunc func(fmt string, args ...interface{})) (KubeletLatencyMetrics, error) {
ms, err := getKubeletMetrics(c, nodeName)
if err != nil {
return KubeletLatencyMetrics{}, err
}
latencyMetrics := GetKubeletLatencyMetrics(ms)
sort.Sort(latencyMetrics)
var badMetrics KubeletLatencyMetrics
logFunc("\nLatency metrics for node %v", nodeName)
for _, m := range latencyMetrics {
if m.Latency > threshold {
badMetrics = append(badMetrics, m)
Logf("%+v", m)
}
}
return badMetrics, nil
}
// getContainerInfo contacts kubelet for the container information. The "Stats"
// in the returned ContainerInfo is subject to the requirements in statsRequest.
// TODO: This function uses the deprecated kubelet stats API; it should be
// removed.
func getContainerInfo(c clientset.Interface, nodeName string, req *kubeletstats.StatsRequest) (map[string]cadvisorapi.ContainerInfo, error) {
reqBody, err := json.Marshal(req)
if err != nil {
return nil, err
}
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
var data []byte
if subResourceProxyAvailable {
data, err = c.Core().RESTClient().Post().
Context(ctx).
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/container").
SetHeader("Content-Type", "application/json").
Body(reqBody).
Do().Raw()
} else {
data, err = c.Core().RESTClient().Post().
Context(ctx).
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/container").
SetHeader("Content-Type", "application/json").
Body(reqBody).
Do().Raw()
}
if err != nil {
return nil, err
}
var containers map[string]cadvisorapi.ContainerInfo
err = json.Unmarshal(data, &containers)
if err != nil {
return nil, err
}
return containers, nil
}
// getOneTimeResourceUsageOnNode queries the node's /stats/container endpoint
// and returns the resource usage of all containerNames for the past
// cpuInterval.
// The acceptable range of the interval is 2s~120s. Be warned that as the
// interval (and #containers) increases, the size of kubelet's response
// could be significant. E.g., the 60s interval stats for ~20 containers is
// ~1.5MB. Don't hammer the node with frequent, heavy requests.
//
// cadvisor records cumulative cpu usage in nanoseconds, so we need to have two
// stats points to compute the cpu usage over the interval. Assuming cadvisor
// polls every second, we'd need to get N stats points for N-second interval.
// Note that this is an approximation and may not be accurate, hence we also
// write the actual interval used for calculation (based on the timestamps of
// the stats points in ContainerResourceUsage.CPUInterval.
//
// containerNames is a function returning a collection of container names in which
// user is interested in. ExpectMissingContainers is a flag which says if the test
// should fail if one of containers listed by containerNames is missing on any node
// (useful e.g. when looking for system containers or daemons). If set to true function
// is more forgiving and ignores missing containers.
// TODO: This function relies on the deprecated kubelet stats API and should be
// removed and/or rewritten.
func getOneTimeResourceUsageOnNode(
c clientset.Interface,
nodeName string,
cpuInterval time.Duration,
containerNames func() []string,
expectMissingContainers bool,
) (ResourceUsagePerContainer, error) {
const (
// cadvisor records stats about every second.
cadvisorStatsPollingIntervalInSeconds float64 = 1.0
// cadvisor caches up to 2 minutes of stats (configured by kubelet).
maxNumStatsToRequest int = 120
)
numStats := int(float64(cpuInterval.Seconds()) / cadvisorStatsPollingIntervalInSeconds)
if numStats < 2 || numStats > maxNumStatsToRequest {
return nil, fmt.Errorf("numStats needs to be > 1 and < %d", maxNumStatsToRequest)
}
// Get information of all containers on the node.
containerInfos, err := getContainerInfo(c, nodeName, &kubeletstats.StatsRequest{
ContainerName: "/",
NumStats: numStats,
Subcontainers: true,
})
if err != nil {
return nil, err
}
f := func(name string, oldStats, newStats *cadvisorapi.ContainerStats) *ContainerResourceUsage {
return &ContainerResourceUsage{
Name: name,
Timestamp: newStats.Timestamp,
CPUUsageInCores: float64(newStats.Cpu.Usage.Total-oldStats.Cpu.Usage.Total) / float64(newStats.Timestamp.Sub(oldStats.Timestamp).Nanoseconds()),
MemoryUsageInBytes: newStats.Memory.Usage,
MemoryWorkingSetInBytes: newStats.Memory.WorkingSet,
MemoryRSSInBytes: newStats.Memory.RSS,
CPUInterval: newStats.Timestamp.Sub(oldStats.Timestamp),
}
}
// Process container infos that are relevant to us.
containers := containerNames()
usageMap := make(ResourceUsagePerContainer, len(containers))
for _, name := range containers {
info, ok := containerInfos[name]
if !ok {
if !expectMissingContainers {
return nil, fmt.Errorf("missing info for container %q on node %q", name, nodeName)
}
continue
}
first := info.Stats[0]
last := info.Stats[len(info.Stats)-1]
usageMap[name] = f(name, first, last)
}
return usageMap, nil
}
func getNodeStatsSummary(c clientset.Interface, nodeName string) (*stats.Summary, error) {
subResourceProxyAvailable, err := ServerVersionGTE(subResourceServiceAndNodeProxyVersion, c.Discovery())
if err != nil {
return nil, err
}
var data []byte
if subResourceProxyAvailable {
data, err = c.Core().RESTClient().Get().
Resource("nodes").
SubResource("proxy").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
} else {
data, err = c.Core().RESTClient().Get().
Prefix("proxy").
Resource("nodes").
Name(fmt.Sprintf("%v:%v", nodeName, ports.KubeletPort)).
Suffix("stats/summary").
SetHeader("Content-Type", "application/json").
Do().Raw()
}
if err != nil {
return nil, err
}
var summary *stats.Summary
err = json.Unmarshal(data, &summary)
if err != nil {
return nil, err
}
return summary, nil
}
func getSystemContainerStats(summary *stats.Summary) map[string]*stats.ContainerStats {
statsList := summary.Node.SystemContainers
statsMap := make(map[string]*stats.ContainerStats)
for i := range statsList {
statsMap[statsList[i].Name] = &statsList[i]
}
// Create a root container stats using information available in
// stats.NodeStats. This is necessary since it is a different type.
statsMap[rootContainerName] = &stats.ContainerStats{
CPU: summary.Node.CPU,
Memory: summary.Node.Memory,
}
return statsMap
}
const (
rootContainerName = "/"
)
// A list of containers for which we want to collect resource usage.
func TargetContainers() []string {
return []string{
rootContainerName,
stats.SystemContainerRuntime,
stats.SystemContainerKubelet,
}
}
type ContainerResourceUsage struct {
Name string
Timestamp time.Time
CPUUsageInCores float64
MemoryUsageInBytes uint64
MemoryWorkingSetInBytes uint64
MemoryRSSInBytes uint64
// The interval used to calculate CPUUsageInCores.
CPUInterval time.Duration
}
func (r *ContainerResourceUsage) isStrictlyGreaterThan(rhs *ContainerResourceUsage) bool {
return r.CPUUsageInCores > rhs.CPUUsageInCores && r.MemoryWorkingSetInBytes > rhs.MemoryWorkingSetInBytes
}
type ResourceUsagePerContainer map[string]*ContainerResourceUsage
type ResourceUsagePerNode map[string]ResourceUsagePerContainer
func formatResourceUsageStats(nodeName string, containerStats ResourceUsagePerContainer) string {
// Example output:
//
// Resource usage for node "e2e-test-foo-node-abcde":
// container cpu(cores) memory(MB)
// "/" 0.363 2942.09
// "/docker-daemon" 0.088 521.80
// "/kubelet" 0.086 424.37
// "/system" 0.007 119.88
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "container\tcpu(cores)\tmemory_working_set(MB)\tmemory_rss(MB)\n")
for name, s := range containerStats {
fmt.Fprintf(w, "%q\t%.3f\t%.2f\t%.2f\n", name, s.CPUUsageInCores, float64(s.MemoryWorkingSetInBytes)/(1024*1024), float64(s.MemoryRSSInBytes)/(1024*1024))
}
w.Flush()
return fmt.Sprintf("Resource usage on node %q:\n%s", nodeName, buf.String())
}
type uint64arr []uint64
func (a uint64arr) Len() int { return len(a) }
func (a uint64arr) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a uint64arr) Less(i, j int) bool { return a[i] < a[j] }
type usageDataPerContainer struct {
cpuData []float64
memUseData []uint64
memWorkSetData []uint64
}
func GetKubeletHeapStats(c clientset.Interface, nodeName string) (string, error) {
client, err := NodeProxyRequest(c, nodeName, "debug/pprof/heap")
if err != nil {
return "", err
}
raw, errRaw := client.Raw()
if errRaw != nil {
return "", err
}
stats := string(raw)
// Only dumping the runtime.MemStats numbers to avoid polluting the log.
numLines := 23
lines := strings.Split(stats, "\n")
return strings.Join(lines[len(lines)-numLines:], "\n"), nil
}
func PrintAllKubeletPods(c clientset.Interface, nodeName string) {
podList, err := GetKubeletPods(c, nodeName)
if err != nil {
Logf("Unable to retrieve kubelet pods for node %v: %v", nodeName, err)
return
}
for _, p := range podList.Items {
Logf("%v from %v started at %v (%d container statuses recorded)", p.Name, p.Namespace, p.Status.StartTime, len(p.Status.ContainerStatuses))
for _, c := range p.Status.ContainerStatuses {
Logf("\tContainer %v ready: %v, restart count %v",
c.Name, c.Ready, c.RestartCount)
}
}
}
func computeContainerResourceUsage(name string, oldStats, newStats *stats.ContainerStats) *ContainerResourceUsage {
return &ContainerResourceUsage{
Name: name,
Timestamp: newStats.CPU.Time.Time,
CPUUsageInCores: float64(*newStats.CPU.UsageCoreNanoSeconds-*oldStats.CPU.UsageCoreNanoSeconds) / float64(newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time).Nanoseconds()),
MemoryUsageInBytes: *newStats.Memory.UsageBytes,
MemoryWorkingSetInBytes: *newStats.Memory.WorkingSetBytes,
MemoryRSSInBytes: *newStats.Memory.RSSBytes,
CPUInterval: newStats.CPU.Time.Time.Sub(oldStats.CPU.Time.Time),
}
}
// resourceCollector periodically polls the node, collect stats for a given
// list of containers, computes and cache resource usage up to
// maxEntriesPerContainer for each container.
type resourceCollector struct {
lock sync.RWMutex
node string
containers []string
client clientset.Interface
buffers map[string][]*ContainerResourceUsage
pollingInterval time.Duration
stopCh chan struct{}
}
func newResourceCollector(c clientset.Interface, nodeName string, containerNames []string, pollingInterval time.Duration) *resourceCollector {
buffers := make(map[string][]*ContainerResourceUsage)
return &resourceCollector{
node: nodeName,
containers: containerNames,
client: c,
buffers: buffers,
pollingInterval: pollingInterval,
}
}
// Start starts a goroutine to Poll the node every pollingInterval.
func (r *resourceCollector) Start() {
r.stopCh = make(chan struct{}, 1)
// Keep the last observed stats for comparison.
oldStats := make(map[string]*stats.ContainerStats)
go wait.Until(func() { r.collectStats(oldStats) }, r.pollingInterval, r.stopCh)
}
// Stop sends a signal to terminate the stats collecting goroutine.
func (r *resourceCollector) Stop() {
close(r.stopCh)
}
// collectStats gets the latest stats from kubelet stats summary API, computes
// the resource usage, and pushes it to the buffer.
func (r *resourceCollector) collectStats(oldStatsMap map[string]*stats.ContainerStats) {
summary, err := getNodeStatsSummary(r.client, r.node)
if err != nil {
Logf("Error getting node stats summary on %q, err: %v", r.node, err)
return
}
cStatsMap := getSystemContainerStats(summary)
r.lock.Lock()
defer r.lock.Unlock()
for _, name := range r.containers {
cStats, ok := cStatsMap[name]
if !ok {
Logf("Missing info/stats for container %q on node %q", name, r.node)
return
}
if oldStats, ok := oldStatsMap[name]; ok {
if oldStats.CPU.Time.Equal(cStats.CPU.Time) {
// No change -> skip this stat.
continue
}
r.buffers[name] = append(r.buffers[name], computeContainerResourceUsage(name, oldStats, cStats))
}
// Update the old stats.
oldStatsMap[name] = cStats
}
}
func (r *resourceCollector) GetLatest() (ResourceUsagePerContainer, error) {
r.lock.RLock()
defer r.lock.RUnlock()
stats := make(ResourceUsagePerContainer)
for _, name := range r.containers {
contStats, ok := r.buffers[name]
if !ok || len(contStats) == 0 {
return nil, fmt.Errorf("Resource usage on node %q is not ready yet", r.node)
}
stats[name] = contStats[len(contStats)-1]
}
return stats, nil
}
// Reset frees the stats and start over.
func (r *resourceCollector) Reset() {
r.lock.Lock()
defer r.lock.Unlock()
for _, name := range r.containers {
r.buffers[name] = []*ContainerResourceUsage{}
}
}
type resourceUsageByCPU []*ContainerResourceUsage
func (r resourceUsageByCPU) Len() int { return len(r) }
func (r resourceUsageByCPU) Swap(i, j int) { r[i], r[j] = r[j], r[i] }
func (r resourceUsageByCPU) Less(i, j int) bool { return r[i].CPUUsageInCores < r[j].CPUUsageInCores }
// The percentiles to report.
var percentiles = [...]float64{0.05, 0.20, 0.50, 0.70, 0.90, 0.95, 0.99}
// GetBasicCPUStats returns the percentiles the cpu usage in cores for
// containerName. This method examines all data currently in the buffer.
func (r *resourceCollector) GetBasicCPUStats(containerName string) map[float64]float64 {
r.lock.RLock()
defer r.lock.RUnlock()
result := make(map[float64]float64, len(percentiles))
usages := r.buffers[containerName]
sort.Sort(resourceUsageByCPU(usages))
for _, q := range percentiles {
index := int(float64(len(usages))*q) - 1
if index < 0 {
// We don't have enough data.
result[q] = 0
continue
}
result[q] = usages[index].CPUUsageInCores
}
return result
}
// ResourceMonitor manages a resourceCollector per node.
type ResourceMonitor struct {
client clientset.Interface
containers []string
pollingInterval time.Duration
collectors map[string]*resourceCollector
}
func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingInterval time.Duration) *ResourceMonitor {
return &ResourceMonitor{
containers: containerNames,
client: c,
pollingInterval: pollingInterval,
}
}
func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
}
r.collectors = make(map[string]*resourceCollector, 0)
for _, node := range nodes.Items {
collector := newResourceCollector(r.client, node.Name, r.containers, r.pollingInterval)
r.collectors[node.Name] = collector
collector.Start()
}
}
func (r *ResourceMonitor) Stop() {
for _, collector := range r.collectors {
collector.Stop()
}
}
func (r *ResourceMonitor) Reset() {
for _, collector := range r.collectors {
collector.Reset()
}
}
func (r *ResourceMonitor) LogLatest() {
summary, err := r.GetLatest()
if err != nil {
Logf("%v", err)
}
Logf("%s", r.FormatResourceUsage(summary))
}
func (r *ResourceMonitor) FormatResourceUsage(s ResourceUsagePerNode) string {
summary := []string{}
for node, usage := range s {
summary = append(summary, formatResourceUsageStats(node, usage))
}
return strings.Join(summary, "\n")
}
func (r *ResourceMonitor) GetLatest() (ResourceUsagePerNode, error) {
result := make(ResourceUsagePerNode)
errs := []error{}
for key, collector := range r.collectors {
s, err := collector.GetLatest()
if err != nil {
errs = append(errs, err)
continue
}
result[key] = s
}
return result, utilerrors.NewAggregate(errs)
}
func (r *ResourceMonitor) GetMasterNodeLatest(usagePerNode ResourceUsagePerNode) ResourceUsagePerNode {
result := make(ResourceUsagePerNode)
var masterUsage ResourceUsagePerContainer
var nodesUsage []ResourceUsagePerContainer
for node, usage := range usagePerNode {
if strings.HasSuffix(node, "master") {
masterUsage = usage
} else {
nodesUsage = append(nodesUsage, usage)
}
}
nodeAvgUsage := make(ResourceUsagePerContainer)
for _, nodeUsage := range nodesUsage {
for c, usage := range nodeUsage {
if _, found := nodeAvgUsage[c]; !found {
nodeAvgUsage[c] = &ContainerResourceUsage{Name: usage.Name}
}
nodeAvgUsage[c].CPUUsageInCores += usage.CPUUsageInCores
nodeAvgUsage[c].MemoryUsageInBytes += usage.MemoryUsageInBytes
nodeAvgUsage[c].MemoryWorkingSetInBytes += usage.MemoryWorkingSetInBytes
nodeAvgUsage[c].MemoryRSSInBytes += usage.MemoryRSSInBytes
}
}
for c := range nodeAvgUsage {
nodeAvgUsage[c].CPUUsageInCores /= float64(len(nodesUsage))
nodeAvgUsage[c].MemoryUsageInBytes /= uint64(len(nodesUsage))
nodeAvgUsage[c].MemoryWorkingSetInBytes /= uint64(len(nodesUsage))
nodeAvgUsage[c].MemoryRSSInBytes /= uint64(len(nodesUsage))
}
result["master"] = masterUsage
result["node"] = nodeAvgUsage
return result
}
// ContainersCPUSummary is indexed by the container name with each entry a
// (percentile, value) map.
type ContainersCPUSummary map[string]map[float64]float64
// NodesCPUSummary is indexed by the node name with each entry a
// ContainersCPUSummary map.
type NodesCPUSummary map[string]ContainersCPUSummary
func (r *ResourceMonitor) FormatCPUSummary(summary NodesCPUSummary) string {
// Example output for a node (the percentiles may differ):
// CPU usage of containers on node "e2e-test-foo-node-0vj7":
// container 5th% 50th% 90th% 95th%
// "/" 0.051 0.159 0.387 0.455
// "/runtime 0.000 0.000 0.146 0.166
// "/kubelet" 0.036 0.053 0.091 0.154
// "/misc" 0.001 0.001 0.001 0.002
var summaryStrings []string
var header []string
header = append(header, "container")
for _, p := range percentiles {
header = append(header, fmt.Sprintf("%.0fth%%", p*100))
}
for nodeName, containers := range summary {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "%s\n", strings.Join(header, "\t"))
for _, containerName := range TargetContainers() {
var s []string
s = append(s, fmt.Sprintf("%q", containerName))
data, ok := containers[containerName]
for _, p := range percentiles {
value := "N/A"
if ok {
value = fmt.Sprintf("%.3f", data[p])
}
s = append(s, value)
}
fmt.Fprintf(w, "%s\n", strings.Join(s, "\t"))
}
w.Flush()
summaryStrings = append(summaryStrings, fmt.Sprintf("CPU usage of containers on node %q\n:%s", nodeName, buf.String()))
}
return strings.Join(summaryStrings, "\n")
}
func (r *ResourceMonitor) LogCPUSummary() {
summary := r.GetCPUSummary()
Logf("%s", r.FormatCPUSummary(summary))
}
func (r *ResourceMonitor) GetCPUSummary() NodesCPUSummary {
result := make(NodesCPUSummary)
for nodeName, collector := range r.collectors {
result[nodeName] = make(ContainersCPUSummary)
for _, containerName := range TargetContainers() {
data := collector.GetBasicCPUStats(containerName)
result[nodeName][containerName] = data
}
}
return result
}
func (r *ResourceMonitor) GetMasterNodeCPUSummary(summaryPerNode NodesCPUSummary) NodesCPUSummary {
result := make(NodesCPUSummary)
var masterSummary ContainersCPUSummary
var nodesSummaries []ContainersCPUSummary
for node, summary := range summaryPerNode {
if strings.HasSuffix(node, "master") {
masterSummary = summary
} else {
nodesSummaries = append(nodesSummaries, summary)
}
}
nodeAvgSummary := make(ContainersCPUSummary)
for _, nodeSummary := range nodesSummaries {
for c, summary := range nodeSummary {
if _, found := nodeAvgSummary[c]; !found {
nodeAvgSummary[c] = map[float64]float64{}
}
for perc, value := range summary {
nodeAvgSummary[c][perc] += value
}
}
}
for c := range nodeAvgSummary {
for perc := range nodeAvgSummary[c] {
nodeAvgSummary[c][perc] /= float64(len(nodesSummaries))
}
}
result["master"] = masterSummary
result["node"] = nodeAvgSummary
return result
}

View file

@ -0,0 +1,269 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
)
const (
// Minimal period between polling log sizes from components
pollingPeriod = 60 * time.Second
workersNo = 5
kubeletLogsPath = "/var/log/kubelet.log"
kubeProxyLogsPath = "/var/log/kube-proxy.log"
kubeAddonsLogsPath = "/var/log/kube-addons.log"
kubeMasterAddonsLogsPath = "/var/log/kube-master-addons.log"
apiServerLogsPath = "/var/log/kube-apiserver.log"
controllersLogsPath = "/var/log/kube-controller-manager.log"
schedulerLogsPath = "/var/log/kube-scheduler.log"
)
var (
nodeLogsToCheck = []string{kubeletLogsPath, kubeProxyLogsPath}
masterLogsToCheck = []string{kubeletLogsPath, kubeAddonsLogsPath, kubeMasterAddonsLogsPath,
apiServerLogsPath, controllersLogsPath, schedulerLogsPath}
)
// TimestampedSize contains a size together with a time of measurement.
type TimestampedSize struct {
timestamp time.Time
size int
}
// LogSizeGatherer is a worker which grabs a WorkItem from the channel and does assigned work.
type LogSizeGatherer struct {
stopChannel chan bool
data *LogsSizeData
wg *sync.WaitGroup
workChannel chan WorkItem
}
// LogsSizeVerifier gathers data about log files sizes from master and node machines.
// It oversees a <workersNo> workers which do the gathering.
type LogsSizeVerifier struct {
client clientset.Interface
stopChannel chan bool
// data stores LogSizeData groupped per IP and log_path
data *LogsSizeData
masterAddress string
nodeAddresses []string
wg sync.WaitGroup
workChannel chan WorkItem
workers []*LogSizeGatherer
}
type SingleLogSummary struct {
AverageGenerationRate int
NumberOfProbes int
}
type LogSizeDataTimeseries map[string]map[string][]TimestampedSize
// node -> file -> data
type LogsSizeDataSummary map[string]map[string]SingleLogSummary
// TODO: make sure that we don't need locking here
func (s *LogsSizeDataSummary) PrintHumanReadable() string {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
fmt.Fprintf(w, "host\tlog_file\taverage_rate (B/s)\tnumber_of_probes\n")
for k, v := range *s {
fmt.Fprintf(w, "%v\t\t\t\n", k)
for path, data := range v {
fmt.Fprintf(w, "\t%v\t%v\t%v\n", path, data.AverageGenerationRate, data.NumberOfProbes)
}
}
w.Flush()
return buf.String()
}
func (s *LogsSizeDataSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
}
type LogsSizeData struct {
data LogSizeDataTimeseries
lock sync.Mutex
}
// WorkItem is a command for a worker that contains an IP of machine from which we want to
// gather data and paths to all files we're interested in.
type WorkItem struct {
ip string
paths []string
backoffMultiplier int
}
func prepareData(masterAddress string, nodeAddresses []string) *LogsSizeData {
data := make(LogSizeDataTimeseries)
ips := append(nodeAddresses, masterAddress)
for _, ip := range ips {
data[ip] = make(map[string][]TimestampedSize)
}
return &LogsSizeData{
data: data,
lock: sync.Mutex{},
}
}
func (d *LogsSizeData) AddNewData(ip, path string, timestamp time.Time, size int) {
d.lock.Lock()
defer d.lock.Unlock()
d.data[ip][path] = append(
d.data[ip][path],
TimestampedSize{
timestamp: timestamp,
size: size,
},
)
}
// NewLogsVerifier creates a new LogsSizeVerifier which will stop when stopChannel is closed
func NewLogsVerifier(c clientset.Interface, stopChannel chan bool) *LogsSizeVerifier {
nodeAddresses, err := NodeSSHHosts(c)
ExpectNoError(err)
masterAddress := GetMasterHost() + ":22"
workChannel := make(chan WorkItem, len(nodeAddresses)+1)
workers := make([]*LogSizeGatherer, workersNo)
verifier := &LogsSizeVerifier{
client: c,
stopChannel: stopChannel,
data: prepareData(masterAddress, nodeAddresses),
masterAddress: masterAddress,
nodeAddresses: nodeAddresses,
wg: sync.WaitGroup{},
workChannel: workChannel,
workers: workers,
}
verifier.wg.Add(workersNo)
for i := 0; i < workersNo; i++ {
workers[i] = &LogSizeGatherer{
stopChannel: stopChannel,
data: verifier.data,
wg: &verifier.wg,
workChannel: workChannel,
}
}
return verifier
}
// GetSummary returns a summary (average generation rate and number of probes) of the data gathered by LogSizeVerifier
func (s *LogsSizeVerifier) GetSummary() *LogsSizeDataSummary {
result := make(LogsSizeDataSummary)
for k, v := range s.data.data {
result[k] = make(map[string]SingleLogSummary)
for path, data := range v {
if len(data) > 1 {
last := data[len(data)-1]
first := data[0]
rate := (last.size - first.size) / int(last.timestamp.Sub(first.timestamp)/time.Second)
result[k][path] = SingleLogSummary{
AverageGenerationRate: rate,
NumberOfProbes: len(data),
}
}
}
}
return &result
}
// Run starts log size gathering. It starts a gorouting for every worker and then blocks until stopChannel is closed
func (v *LogsSizeVerifier) Run() {
v.workChannel <- WorkItem{
ip: v.masterAddress,
paths: masterLogsToCheck,
backoffMultiplier: 1,
}
for _, node := range v.nodeAddresses {
v.workChannel <- WorkItem{
ip: node,
paths: nodeLogsToCheck,
backoffMultiplier: 1,
}
}
for _, worker := range v.workers {
go worker.Run()
}
<-v.stopChannel
v.wg.Wait()
}
func (g *LogSizeGatherer) Run() {
for g.Work() {
}
}
func (g *LogSizeGatherer) pushWorkItem(workItem WorkItem) {
select {
case <-time.After(time.Duration(workItem.backoffMultiplier) * pollingPeriod):
g.workChannel <- workItem
case <-g.stopChannel:
return
}
}
// Work does a single unit of work: tries to take out a WorkItem from the queue, ssh-es into a given machine,
// gathers data, writes it to the shared <data> map, and creates a gorouting which reinserts work item into
// the queue with a <pollingPeriod> delay. Returns false if worker should exit.
func (g *LogSizeGatherer) Work() bool {
var workItem WorkItem
select {
case <-g.stopChannel:
g.wg.Done()
return false
case workItem = <-g.workChannel:
}
sshResult, err := SSH(
fmt.Sprintf("ls -l %v | awk '{print $9, $5}' | tr '\n' ' '", strings.Join(workItem.paths, " ")),
workItem.ip,
TestContext.Provider,
)
if err != nil {
Logf("Error while trying to SSH to %v, skipping probe. Error: %v", workItem.ip, err)
if workItem.backoffMultiplier < 128 {
workItem.backoffMultiplier *= 2
}
go g.pushWorkItem(workItem)
return true
}
workItem.backoffMultiplier = 1
results := strings.Split(sshResult.Stdout, " ")
now := time.Now()
for i := 0; i+1 < len(results); i = i + 2 {
path := results[i]
size, err := strconv.Atoi(results[i+1])
if err != nil {
Logf("Error during conversion to int: %v, skipping data. Error: %v", results[i+1], err)
continue
}
g.data.AddNewData(workItem.ip, path, now, size)
}
go g.pushWorkItem(workItem)
return true
}

View file

@ -0,0 +1,494 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"math"
"sort"
"strconv"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/metrics"
"github.com/prometheus/common/expfmt"
"github.com/prometheus/common/model"
)
const (
// NodeStartupThreshold is a rough estimate of the time allocated for a pod to start on a node.
NodeStartupThreshold = 4 * time.Second
podStartupThreshold time.Duration = 5 * time.Second
// We are setting 1s threshold for apicalls even in small clusters to avoid flakes.
// The problem is that if long GC is happening in small clusters (where we have e.g.
// 1-core master machines) and tests are pretty short, it may consume significant
// portion of CPU and basically stop all the real work.
// Increasing threshold to 1s is within our SLO and should solve this problem.
apiCallLatencyThreshold time.Duration = 1 * time.Second
)
type MetricsForE2E metrics.MetricsCollection
func (m *MetricsForE2E) filterMetrics() {
interestingApiServerMetrics := make(metrics.ApiServerMetrics)
for _, metric := range InterestingApiServerMetrics {
interestingApiServerMetrics[metric] = (*m).ApiServerMetrics[metric]
}
interestingControllerManagerMetrics := make(metrics.ControllerManagerMetrics)
for _, metric := range InterestingControllerManagerMetrics {
interestingControllerManagerMetrics[metric] = (*m).ControllerManagerMetrics[metric]
}
interestingKubeletMetrics := make(map[string]metrics.KubeletMetrics)
for kubelet, grabbed := range (*m).KubeletMetrics {
interestingKubeletMetrics[kubelet] = make(metrics.KubeletMetrics)
for _, metric := range InterestingKubeletMetrics {
interestingKubeletMetrics[kubelet][metric] = grabbed[metric]
}
}
(*m).ApiServerMetrics = interestingApiServerMetrics
(*m).ControllerManagerMetrics = interestingControllerManagerMetrics
(*m).KubeletMetrics = interestingKubeletMetrics
}
func (m *MetricsForE2E) PrintHumanReadable() string {
buf := bytes.Buffer{}
for _, interestingMetric := range InterestingApiServerMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
for _, sample := range (*m).ApiServerMetrics[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
}
}
for _, interestingMetric := range InterestingControllerManagerMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", interestingMetric))
for _, sample := range (*m).ControllerManagerMetrics[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t%v\n", metrics.PrintSample(sample)))
}
}
for kubelet, grabbed := range (*m).KubeletMetrics {
buf.WriteString(fmt.Sprintf("For %v:\n", kubelet))
for _, interestingMetric := range InterestingKubeletMetrics {
buf.WriteString(fmt.Sprintf("\tFor %v:\n", interestingMetric))
for _, sample := range grabbed[interestingMetric] {
buf.WriteString(fmt.Sprintf("\t\t%v\n", metrics.PrintSample(sample)))
}
}
}
return buf.String()
}
func (m *MetricsForE2E) PrintJSON() string {
m.filterMetrics()
return PrettyPrintJSON(*m)
}
var InterestingApiServerMetrics = []string{
"apiserver_request_count",
"apiserver_request_latencies_summary",
"etcd_helper_cache_entry_count",
"etcd_helper_cache_hit_count",
"etcd_helper_cache_miss_count",
"etcd_request_cache_add_latencies_summary",
"etcd_request_cache_get_latencies_summary",
"etcd_request_latencies_summary",
}
var InterestingControllerManagerMetrics = []string{
"garbage_collector_event_processing_latency_microseconds",
"garbage_collector_dirty_processing_latency_microseconds",
"garbage_collector_orphan_processing_latency_microseconds",
}
var InterestingKubeletMetrics = []string{
"kubelet_container_manager_latency_microseconds",
"kubelet_docker_errors",
"kubelet_docker_operations_latency_microseconds",
"kubelet_generate_pod_status_latency_microseconds",
"kubelet_pod_start_latency_microseconds",
"kubelet_pod_worker_latency_microseconds",
"kubelet_pod_worker_start_latency_microseconds",
"kubelet_sync_pods_latency_microseconds",
}
// Dashboard metrics
type LatencyMetric struct {
Perc50 time.Duration `json:"Perc50"`
Perc90 time.Duration `json:"Perc90"`
Perc99 time.Duration `json:"Perc99"`
Perc100 time.Duration `json:"Perc100"`
}
type PodStartupLatency struct {
Latency LatencyMetric `json:"latency"`
}
type SchedulingLatency struct {
Scheduling LatencyMetric `json:"scheduling"`
Binding LatencyMetric `json:"binding"`
Total LatencyMetric `json:"total"`
}
type SaturationTime struct {
TimeToSaturate time.Duration `json:"timeToStaturate"`
NumberOfNodes int `json:"numberOfNodes"`
NumberOfPods int `json:"numberOfPods"`
Throughput float32 `json:"throughput"`
}
type APICall struct {
Resource string `json:"resource"`
Verb string `json:"verb"`
Latency LatencyMetric `json:"latency"`
}
type APIResponsiveness struct {
APICalls []APICall `json:"apicalls"`
}
func (a APIResponsiveness) Len() int { return len(a.APICalls) }
func (a APIResponsiveness) Swap(i, j int) { a.APICalls[i], a.APICalls[j] = a.APICalls[j], a.APICalls[i] }
func (a APIResponsiveness) Less(i, j int) bool {
return a.APICalls[i].Latency.Perc99 < a.APICalls[j].Latency.Perc99
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func (a *APIResponsiveness) addMetric(resource, verb string, quantile float64, latency time.Duration) {
for i, apicall := range a.APICalls {
if apicall.Resource == resource && apicall.Verb == verb {
a.APICalls[i] = setQuantileAPICall(apicall, quantile, latency)
return
}
}
apicall := setQuantileAPICall(APICall{Resource: resource, Verb: verb}, quantile, latency)
a.APICalls = append(a.APICalls, apicall)
}
// 0 <= quantile <=1 (e.g. 0.95 is 95%tile, 0.5 is median)
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantileAPICall(apicall APICall, quantile float64, latency time.Duration) APICall {
setQuantile(&apicall.Latency, quantile, latency)
return apicall
}
// Only 0.5, 0.9 and 0.99 quantiles are supported.
func setQuantile(metric *LatencyMetric, quantile float64, latency time.Duration) {
switch quantile {
case 0.5:
metric.Perc50 = latency
case 0.9:
metric.Perc90 = latency
case 0.99:
metric.Perc99 = latency
}
}
func readLatencyMetrics(c clientset.Interface) (APIResponsiveness, error) {
var a APIResponsiveness
body, err := getMetrics(c)
if err != nil {
return a, err
}
samples, err := extractMetricSamples(body)
if err != nil {
return a, err
}
ignoredResources := sets.NewString("events")
// TODO: figure out why we're getting non-capitalized proxy and fix this.
ignoredVerbs := sets.NewString("WATCHLIST", "PROXY", "proxy", "CONNECT")
for _, sample := range samples {
// Example line:
// apiserver_request_latencies_summary{resource="namespaces",verb="LIST",quantile="0.99"} 908
if sample.Metric[model.MetricNameLabel] != "apiserver_request_latencies_summary" {
continue
}
resource := string(sample.Metric["resource"])
verb := string(sample.Metric["verb"])
if ignoredResources.Has(resource) || ignoredVerbs.Has(verb) {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return a, err
}
a.addMetric(resource, verb, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return a, err
}
// Prints top five summary metrics for request types with latency and returns
// number of such request types above threshold.
func HighLatencyRequests(c clientset.Interface) (int, error) {
metrics, err := readLatencyMetrics(c)
if err != nil {
return 0, err
}
sort.Sort(sort.Reverse(metrics))
badMetrics := 0
top := 5
for _, metric := range metrics.APICalls {
isBad := false
if metric.Latency.Perc99 > apiCallLatencyThreshold {
badMetrics++
isBad = true
}
if top > 0 || isBad {
top--
prefix := ""
if isBad {
prefix = "WARNING "
}
Logf("%vTop latency metric: %+v", prefix, metric)
}
}
// TODO(random-liu): Remove the log when we migrate to new perfdash
Logf("API calls latencies: %s", PrettyPrintJSON(metrics))
// Log perf data
PrintPerfData(ApiCallToPerfData(metrics))
return badMetrics, nil
}
// Verifies whether 50, 90 and 99th percentiles of PodStartupLatency are
// within the threshold.
func VerifyPodStartupLatency(latency PodStartupLatency) error {
Logf("Pod startup latency: %s", PrettyPrintJSON(latency))
if latency.Latency.Perc50 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 50th percentile: %v", latency.Latency.Perc50)
}
if latency.Latency.Perc90 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 90th percentile: %v", latency.Latency.Perc90)
}
if latency.Latency.Perc99 > podStartupThreshold {
return fmt.Errorf("too high pod startup latency 99th percentil: %v", latency.Latency.Perc99)
}
return nil
}
// Resets latency metrics in apiserver.
func ResetMetrics(c clientset.Interface) error {
Logf("Resetting latency metrics in apiserver...")
body, err := c.Core().RESTClient().Delete().AbsPath("/metrics").DoRaw()
if err != nil {
return err
}
if string(body) != "metrics reset\n" {
return fmt.Errorf("Unexpected response: %q", string(body))
}
return nil
}
// Retrieves metrics information.
func getMetrics(c clientset.Interface) (string, error) {
body, err := c.Core().RESTClient().Get().AbsPath("/metrics").DoRaw()
if err != nil {
return "", err
}
return string(body), nil
}
// Retrieves scheduler metrics information.
func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
result := SchedulingLatency{}
// Check if master Node is registered
nodes, err := c.Core().Nodes().List(v1.ListOptions{})
ExpectNoError(err)
var data string
var masterRegistered = false
for _, node := range nodes.Items {
if strings.HasSuffix(node.Name, "master") {
masterRegistered = true
}
}
if masterRegistered {
ctx, cancel := context.WithTimeout(context.Background(), SingleCallTimeout)
defer cancel()
rawData, err := c.Core().RESTClient().Get().
Context(ctx).
Prefix("proxy").
Namespace(api.NamespaceSystem).
Resource("pods").
Name(fmt.Sprintf("kube-scheduler-%v:%v", TestContext.CloudConfig.MasterName, ports.SchedulerPort)).
Suffix("metrics").
Do().Raw()
ExpectNoError(err)
data = string(rawData)
} else {
// If master is not registered fall back to old method of using SSH.
cmd := "curl http://localhost:10251/metrics"
sshResult, err := SSH(cmd, GetMasterHost()+":22", TestContext.Provider)
if err != nil || sshResult.Code != 0 {
return result, fmt.Errorf("unexpected error (code: %d) in ssh connection to master: %#v", sshResult.Code, err)
}
data = sshResult.Stdout
}
samples, err := extractMetricSamples(data)
if err != nil {
return result, err
}
for _, sample := range samples {
var metric *LatencyMetric = nil
switch sample.Metric[model.MetricNameLabel] {
case "scheduler_scheduling_algorithm_latency_microseconds":
metric = &result.Scheduling
case "scheduler_binding_latency_microseconds":
metric = &result.Binding
case "scheduler_e2e_scheduling_latency_microseconds":
metric = &result.Total
}
if metric == nil {
continue
}
latency := sample.Value
quantile, err := strconv.ParseFloat(string(sample.Metric[model.QuantileLabel]), 64)
if err != nil {
return result, err
}
setQuantile(metric, quantile, time.Duration(int64(latency))*time.Microsecond)
}
return result, nil
}
// Verifies (currently just by logging them) the scheduling latencies.
func VerifySchedulerLatency(c clientset.Interface) error {
latency, err := getSchedulingLatency(c)
if err != nil {
return err
}
Logf("Scheduling latency: %s", PrettyPrintJSON(latency))
// TODO: Add some reasonable checks once we know more about the values.
return nil
}
func PrettyPrintJSON(metrics interface{}) string {
output := &bytes.Buffer{}
if err := json.NewEncoder(output).Encode(metrics); err != nil {
Logf("Error building encoder: %v", err)
return ""
}
formatted := &bytes.Buffer{}
if err := json.Indent(formatted, output.Bytes(), "", " "); err != nil {
Logf("Error indenting: %v", err)
return ""
}
return string(formatted.Bytes())
}
// extractMetricSamples parses the prometheus metric samples from the input string.
func extractMetricSamples(metricsBlob string) ([]*model.Sample, error) {
dec := expfmt.NewDecoder(strings.NewReader(metricsBlob), expfmt.FmtText)
decoder := expfmt.SampleDecoder{
Dec: dec,
Opts: &expfmt.DecodeOptions{},
}
var samples []*model.Sample
for {
var v model.Vector
if err := decoder.Decode(&v); err != nil {
if err == io.EOF {
// Expected loop termination condition.
return samples, nil
}
return nil, err
}
samples = append(samples, v...)
}
}
// PodLatencyData encapsulates pod startup latency information.
type PodLatencyData struct {
// Name of the pod
Name string
// Node this pod was running on
Node string
// Latency information related to pod startuptime
Latency time.Duration
}
type LatencySlice []PodLatencyData
func (a LatencySlice) Len() int { return len(a) }
func (a LatencySlice) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a LatencySlice) Less(i, j int) bool { return a[i].Latency < a[j].Latency }
func ExtractLatencyMetrics(latencies []PodLatencyData) LatencyMetric {
length := len(latencies)
perc50 := latencies[int(math.Ceil(float64(length*50)/100))-1].Latency
perc90 := latencies[int(math.Ceil(float64(length*90)/100))-1].Latency
perc99 := latencies[int(math.Ceil(float64(length*99)/100))-1].Latency
perc100 := latencies[length-1].Latency
return LatencyMetric{Perc50: perc50, Perc90: perc90, Perc99: perc99, Perc100: perc100}
}
// LogSuspiciousLatency logs metrics/docker errors from all nodes that had slow startup times
// If latencyDataLag is nil then it will be populated from latencyData
func LogSuspiciousLatency(latencyData []PodLatencyData, latencyDataLag []PodLatencyData, nodeCount int, c clientset.Interface) {
if latencyDataLag == nil {
latencyDataLag = latencyData
}
for _, l := range latencyData {
if l.Latency > NodeStartupThreshold {
HighLatencyKubeletOperations(c, 1*time.Second, l.Node, Logf)
}
}
Logf("Approx throughput: %v pods/min",
float64(nodeCount)/(latencyDataLag[len(latencyDataLag)-1].Latency.Minutes()))
}
// testMaximumLatencyValue verifies the highest latency value is less than or equal to
// the given time.Duration. Since the arrays are sorted we are looking at the last
// element which will always be the highest. If the latency is higher than the max Failf
// is called.
func testMaximumLatencyValue(latencies []PodLatencyData, max time.Duration, name string) {
highestLatency := latencies[len(latencies)-1]
if !(highestLatency.Latency <= max) {
Failf("%s were not all under %s: %#v", name, max.String(), latencies)
}
}
func PrintLatencies(latencies []PodLatencyData, header string) {
metrics := ExtractLatencyMetrics(latencies)
Logf("10%% %s: %v", header, latencies[(len(latencies)*9)/10:])
Logf("perc50: %v, perc90: %v, perc99: %v", metrics.Perc50, metrics.Perc90, metrics.Perc99)
}

View file

@ -0,0 +1,828 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net"
"net/http"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
)
const (
EndpointHttpPort = 8080
EndpointUdpPort = 8081
TestContainerHttpPort = 8080
ClusterHttpPort = 80
ClusterUdpPort = 90
NetexecImageName = "gcr.io/google_containers/netexec:1.7"
HostexecImageName = "gcr.io/google_containers/hostexec:1.2"
testPodName = "test-container-pod"
hostTestPodName = "host-test-container-pod"
nodePortServiceName = "node-port-service"
// wait time between poll attempts of a Service vip and/or nodePort.
// coupled with testTries to produce a net timeout value.
hitEndpointRetryDelay = 2 * time.Second
// Number of retries to hit a given set of endpoints. Needs to be high
// because we verify iptables statistical rr loadbalancing.
testTries = 30
// Maximum number of pods in a test, to make test work in large clusters.
maxNetProxyPodsCount = 10
)
// NewNetworkingTestConfig creates and sets up a new test config helper.
func NewNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setup(getServiceSelector())
return config
}
// NewNetworkingTestNodeE2EConfig creates and sets up a new test config helper for Node E2E.
func NewCoreNetworkingTestConfig(f *Framework) *NetworkingTestConfig {
config := &NetworkingTestConfig{f: f, Namespace: f.Namespace.Name}
By(fmt.Sprintf("Performing setup for networking test in namespace %v", config.Namespace))
config.setupCore(getServiceSelector())
return config
}
func getServiceSelector() map[string]string {
By("creating a selector")
selectorName := "selector-" + string(uuid.NewUUID())
serviceSelector := map[string]string{
selectorName: "true",
}
return serviceSelector
}
// NetworkingTestConfig is a convenience class around some utility methods
// for testing kubeproxy/networking/services/endpoints.
type NetworkingTestConfig struct {
// TestContaienrPod is a test pod running the netexec image. It is capable
// of executing tcp/udp requests against ip:port.
TestContainerPod *v1.Pod
// HostTestContainerPod is a pod running with hostNetworking=true, and the
// hostexec image.
HostTestContainerPod *v1.Pod
// EndpointPods are the pods belonging to the Service created by this
// test config. Each invocation of `setup` creates a service with
// 1 pod per node running the netexecImage.
EndpointPods []*v1.Pod
f *Framework
podClient *PodClient
// NodePortService is a Service with Type=NodePort spanning over all
// endpointPods.
NodePortService *v1.Service
// ExternalAddrs is a list of external IPs of nodes in the cluster.
ExternalAddrs []string
// Nodes is a list of nodes in the cluster.
Nodes []v1.Node
// MaxTries is the number of retries tolerated for tests run against
// endpoints and services created by this config.
MaxTries int
// The ClusterIP of the Service reated by this test config.
ClusterIP string
// External ip of first node for use in nodePort testing.
NodeIP string
// The http/udp nodePorts of the Service.
NodeHttpPort int
NodeUdpPort int
// The kubernetes namespace within which all resources for this
// config are created
Namespace string
}
func (config *NetworkingTestConfig) DialFromEndpointContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
config.DialFromContainer(protocol, config.EndpointPods[0].Status.PodIP, targetIP, EndpointHttpPort, targetPort, maxTries, minTries, expectedEps)
}
func (config *NetworkingTestConfig) DialFromTestContainer(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
config.DialFromContainer(protocol, config.TestContainerPod.Status.PodIP, targetIP, TestContainerHttpPort, targetPort, maxTries, minTries, expectedEps)
}
// diagnoseMissingEndpoints prints debug information about the endpoints that
// are NOT in the given list of foundEndpoints. These are the endpoints we
// expected a response from.
func (config *NetworkingTestConfig) diagnoseMissingEndpoints(foundEndpoints sets.String) {
for _, e := range config.EndpointPods {
if foundEndpoints.Has(e.Name) {
continue
}
Logf("\nOutput of kubectl describe pod %v/%v:\n", e.Namespace, e.Name)
desc, _ := RunKubectl(
"describe", "pod", e.Name, fmt.Sprintf("--namespace=%v", e.Namespace))
Logf(desc)
}
}
// EndpointHostnames returns a set of hostnames for existing endpoints.
func (config *NetworkingTestConfig) EndpointHostnames() sets.String {
expectedEps := sets.NewString()
for _, p := range config.EndpointPods {
expectedEps.Insert(p.Name)
}
return expectedEps
}
// DialFromContainers executes a curl via kubectl exec in a test container,
// which might then translate to a tcp or udp request based on the protocol
// argument in the url.
// - minTries is the minimum number of curl attempts required before declaring
// success. Set to 0 if you'd like to return as soon as all endpoints respond
// at least once.
// - maxTries is the maximum number of curl attempts. If this many attempts pass
// and we don't see all expected endpoints, the test fails.
// - expectedEps is the set of endpointnames to wait for. Typically this is also
// the hostname reported by each pod in the service through /hostName.
// maxTries == minTries will confirm that we see the expected endpoints and no
// more for maxTries. Use this if you want to eg: fail a readiness check on a
// pod and confirm it doesn't show up as an endpoint.
func (config *NetworkingTestConfig) DialFromContainer(protocol, containerIP, targetIP string, containerHttpPort, targetPort, maxTries, minTries int, expectedEps sets.String) {
cmd := fmt.Sprintf("curl -q -s 'http://%s:%d/dial?request=hostName&protocol=%s&host=%s&port=%d&tries=1'",
containerIP,
containerHttpPort,
protocol,
targetIP,
targetPort)
eps := sets.NewString()
for i := 0; i < maxTries; i++ {
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, cmd)
if err != nil {
// A failure to kubectl exec counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr %q", cmd, err, stdout, stderr)
} else {
var output map[string][]string
if err := json.Unmarshal([]byte(stdout), &output); err != nil {
Logf("WARNING: Failed to unmarshal curl response. Cmd %v run in %v, output: %s, err: %v",
cmd, config.HostTestContainerPod.Name, stdout, err)
continue
}
for _, hostName := range output["responses"] {
trimmed := strings.TrimSpace(hostName)
if trimmed != "" {
eps.Insert(trimmed)
}
}
}
Logf("Waiting for endpoints: %v", expectedEps.Difference(eps))
// Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
return
}
// TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay)
}
config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps)
}
// DialFromNode executes a tcp or udp request based on protocol via kubectl exec
// in a test container running with host networking.
// - minTries is the minimum number of curl attempts required before declaring
// success. Set to 0 if you'd like to return as soon as all endpoints respond
// at least once.
// - maxTries is the maximum number of curl attempts. If this many attempts pass
// and we don't see all expected endpoints, the test fails.
// maxTries == minTries will confirm that we see the expected endpoints and no
// more for maxTries. Use this if you want to eg: fail a readiness check on a
// pod and confirm it doesn't show up as an endpoint.
func (config *NetworkingTestConfig) DialFromNode(protocol, targetIP string, targetPort, maxTries, minTries int, expectedEps sets.String) {
var cmd string
if protocol == "udp" {
// TODO: It would be enough to pass 1s+epsilon to timeout, but unfortunately
// busybox timeout doesn't support non-integer values.
cmd = fmt.Sprintf("echo 'hostName' | timeout -t 2 nc -w 1 -u %s %d", targetIP, targetPort)
} else {
cmd = fmt.Sprintf("timeout -t 15 curl -q -s --connect-timeout 1 http://%s:%d/hostName", targetIP, targetPort)
}
// TODO: This simply tells us that we can reach the endpoints. Check that
// the probability of hitting a specific endpoint is roughly the same as
// hitting any other.
eps := sets.NewString()
filterCmd := fmt.Sprintf("%s | grep -v '^\\s*$'", cmd)
for i := 0; i < maxTries; i++ {
stdout, stderr, err := config.f.ExecShellInPodWithFullOutput(config.HostTestContainerPod.Name, filterCmd)
if err != nil || len(stderr) > 0 {
// A failure to exec command counts as a try, not a hard fail.
// Also note that we will keep failing for maxTries in tests where
// we confirm unreachability.
Logf("Failed to execute %q: %v, stdout: %q, stderr: %q", filterCmd, err, stdout, stderr)
} else {
trimmed := strings.TrimSpace(stdout)
if trimmed != "" {
eps.Insert(trimmed)
}
}
Logf("Waiting for %+v endpoints, got endpoints %+v", expectedEps.Difference(eps), eps)
// Check against i+1 so we exit if minTries == maxTries.
if (eps.Equal(expectedEps) || eps.Len() == 0 && expectedEps.Len() == 0) && i+1 >= minTries {
return
}
// TODO: get rid of this delay #36281
time.Sleep(hitEndpointRetryDelay)
}
config.diagnoseMissingEndpoints(eps)
Failf("Failed to find expected endpoints:\nTries %d\nCommand %v\nretrieved %v\nexpected %v\n", minTries, cmd, eps, expectedEps)
}
// GetSelfURL executes a curl against the given path via kubectl exec into a
// test container running with host networking, and fails if the output
// doesn't match the expected string.
func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
cmd := fmt.Sprintf("curl -q -s --connect-timeout 1 http://localhost:10249%s", path)
By(fmt.Sprintf("Getting kube-proxy self URL %s", path))
// These are arbitrary timeouts. The curl command should pass on first try,
// unless kubeproxy is starved/bootstrapping/restarting etc.
const retryInterval = 1 * time.Second
const retryTimeout = 30 * time.Second
podName := config.HostTestContainerPod.Name
var msg string
if pollErr := wait.PollImmediate(retryInterval, retryTimeout, func() (bool, error) {
stdout, err := RunHostCmd(config.Namespace, podName, cmd)
if err != nil {
msg = fmt.Sprintf("failed executing cmd %v in %v/%v: %v", cmd, config.Namespace, podName, err)
Logf(msg)
return false, nil
}
if !strings.Contains(stdout, expected) {
msg = fmt.Sprintf("successfully executed %v in %v/%v, but output '%v' doesn't contain expected string '%v'", cmd, config.Namespace, podName, stdout, expected)
Logf(msg)
return false, nil
}
return true, nil
}); pollErr != nil {
Logf("\nOutput of kubectl describe pod %v/%v:\n", config.Namespace, podName)
desc, _ := RunKubectl(
"describe", "pod", podName, fmt.Sprintf("--namespace=%v", config.Namespace))
Logf("%s", desc)
Failf("Timed out in %v: %v", retryTimeout, msg)
}
}
func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node string) *v1.Pod {
probe := &v1.Probe{
InitialDelaySeconds: 10,
TimeoutSeconds: 30,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 3,
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.IntOrString{IntVal: EndpointHttpPort},
},
},
}
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: config.Namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
},
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: EndpointHttpPort,
},
{
Name: "udp",
ContainerPort: EndpointUdpPort,
Protocol: v1.ProtocolUDP,
},
},
LivenessProbe: probe,
ReadinessProbe: probe,
},
},
NodeSelector: map[string]string{
"kubernetes.io/hostname": node,
},
},
}
return pod
}
func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
pod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: testPodName,
Namespace: config.Namespace,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
},
Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: TestContainerHttpPort,
},
},
},
},
},
}
return pod
}
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
serviceSpec := &v1.Service{
ObjectMeta: v1.ObjectMeta{
Name: nodePortServiceName,
},
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
{Port: ClusterHttpPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
{Port: ClusterUdpPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
},
Selector: selector,
},
}
config.NodePortService = config.createService(serviceSpec)
}
func (config *NetworkingTestConfig) DeleteNodePortService() {
err := config.getServiceClient().Delete(config.NodePortService.Name, nil)
Expect(err).NotTo(HaveOccurred(), "error while deleting NodePortService. err:%v)", err)
time.Sleep(15 * time.Second) // wait for kube-proxy to catch up with the service being deleted.
}
func (config *NetworkingTestConfig) createTestPods() {
testContainerPod := config.createTestPodSpec()
hostTestContainerPod := NewHostExecPodSpec(config.Namespace, hostTestPodName)
config.createPod(testContainerPod)
config.createPod(hostTestContainerPod)
ExpectNoError(config.f.WaitForPodRunning(testContainerPod.Name))
ExpectNoError(config.f.WaitForPodRunning(hostTestContainerPod.Name))
var err error
config.TestContainerPod, err = config.getPodClient().Get(testContainerPod.Name, metav1.GetOptions{})
if err != nil {
Failf("Failed to retrieve %s pod: %v", testContainerPod.Name, err)
}
config.HostTestContainerPod, err = config.getPodClient().Get(hostTestContainerPod.Name, metav1.GetOptions{})
if err != nil {
Failf("Failed to retrieve %s pod: %v", hostTestContainerPod.Name, err)
}
}
func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
_, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
err = WaitForService(config.f.ClientSet, config.Namespace, serviceSpec.Name, true, 5*time.Second, 45*time.Second)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("error while waiting for service:%s err: %v", serviceSpec.Name, err))
createdService, err := config.getServiceClient().Get(serviceSpec.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
return createdService
}
// setupCore sets up the pods and core test config
// mainly for simplified node e2e setup
func (config *NetworkingTestConfig) setupCore(selector map[string]string) {
By("Creating the service pods in kubernetes")
podName := "netserver"
config.EndpointPods = config.createNetProxyPods(podName, selector)
By("Creating test pods")
config.createTestPods()
epCount := len(config.EndpointPods)
config.MaxTries = epCount*epCount + testTries
}
// setup includes setupCore and also sets up services
func (config *NetworkingTestConfig) setup(selector map[string]string) {
config.setupCore(selector)
By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs
config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeLegacyHostIP)
}
SkipUnlessNodeCountIsAtLeast(2)
config.Nodes = nodeList.Items
By("Creating the service on top of the pods in kubernetes")
config.createNodePortService(selector)
for _, p := range config.NodePortService.Spec.Ports {
switch p.Protocol {
case v1.ProtocolUDP:
config.NodeUdpPort = int(p.NodePort)
case v1.ProtocolTCP:
config.NodeHttpPort = int(p.NodePort)
default:
continue
}
}
config.ClusterIP = config.NodePortService.Spec.ClusterIP
config.NodeIP = config.ExternalAddrs[0]
}
func (config *NetworkingTestConfig) cleanup() {
nsClient := config.getNamespacesClient()
nsList, err := nsClient.List(v1.ListOptions{})
if err == nil {
for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
nsClient.Delete(ns.Name, nil)
}
}
}
}
// shuffleNodes copies nodes from the specified slice into a copy in random
// order. It returns a new slice.
func shuffleNodes(nodes []v1.Node) []v1.Node {
shuffled := make([]v1.Node, len(nodes))
perm := rand.Perm(len(nodes))
for i, j := range perm {
shuffled[j] = nodes[i]
}
return shuffled
}
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
// To make this test work reasonably fast in large clusters,
// we limit the number of NetProxyPods to no more than
// maxNetProxyPodsCount on random nodes.
nodes := shuffleNodes(nodeList.Items)
if len(nodes) > maxNetProxyPodsCount {
nodes = nodes[:maxNetProxyPodsCount]
}
// create pods, one for each node
createdPods := make([]*v1.Pod, 0, len(nodes))
for i, n := range nodes {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
pod.ObjectMeta.Labels = selector
createdPod := config.createPod(pod)
createdPods = append(createdPods, createdPod)
}
// wait that all of them are up
runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods {
ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name, metav1.GetOptions{})
ExpectNoError(err)
runningPods = append(runningPods, rp)
}
return runningPods
}
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to delete %s pod: %v", pod.Name, err)
}
// wait for endpoint being removed.
err = WaitForServiceEndpointsNum(config.f.ClientSet, config.Namespace, nodePortServiceName, len(config.EndpointPods), time.Second, wait.ForeverTestTimeout)
if err != nil {
Failf("Failed to remove endpoint from service: %s", nodePortServiceName)
}
// wait for kube-proxy to catch up with the pod being deleted.
time.Sleep(5 * time.Second)
}
func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod)
}
func (config *NetworkingTestConfig) getPodClient() *PodClient {
if config.podClient == nil {
config.podClient = config.f.PodClient()
}
return config.podClient
}
func (config *NetworkingTestConfig) getServiceClient() coreclientset.ServiceInterface {
return config.f.ClientSet.Core().Services(config.Namespace)
}
func (config *NetworkingTestConfig) getNamespacesClient() coreclientset.NamespaceInterface {
return config.f.ClientSet.Core().Namespaces()
}
func CheckReachabilityFromPod(expectToBeReachable bool, namespace, pod, target string) {
cmd := fmt.Sprintf("wget -T 5 -qO- %q", target)
err := wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
_, err := RunHostCmd(namespace, pod, cmd)
if expectToBeReachable && err != nil {
Logf("Expect target to be reachable. But got err: %v. Retry until timeout", err)
return false, nil
}
if !expectToBeReachable && err == nil {
Logf("Expect target NOT to be reachable. But it is reachable. Retry until timeout")
return false, nil
}
return true, nil
})
Expect(err).NotTo(HaveOccurred())
}
// Does an HTTP GET, but does not reuse TCP connections
// This masks problems where the iptables rule has changed, but we don't see it
// This is intended for relatively quick requests (status checks), so we set a short (5 seconds) timeout
func httpGetNoConnectionPool(url string) (*http.Response, error) {
return httpGetNoConnectionPoolTimeout(url, 5*time.Second)
}
func httpGetNoConnectionPoolTimeout(url string, timeout time.Duration) (*http.Response, error) {
tr := utilnet.SetTransportDefaults(&http.Transport{
DisableKeepAlives: true,
})
client := &http.Client{
Transport: tr,
Timeout: timeout,
}
return client.Get(url)
}
func TestReachableHTTP(ip string, port int, request string, expect string) (bool, error) {
return TestReachableHTTPWithContent(ip, port, request, expect, nil)
}
func TestReachableHTTPWithContent(ip string, port int, request string, expect string, content *bytes.Buffer) (bool, error) {
return TestReachableHTTPWithContentTimeout(ip, port, request, expect, content, 5*time.Second)
}
func TestReachableHTTPWithContentTimeout(ip string, port int, request string, expect string, content *bytes.Buffer, timeout time.Duration) (bool, error) {
url := fmt.Sprintf("http://%s:%d%s", ip, port, request)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", url)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", url)
return false, nil
}
Logf("Testing HTTP reachability of %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, timeout)
if err != nil {
Logf("Got error testing for reachability of %s: %v", url, err)
return false, nil
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
Logf("Got error reading response from %s: %v", url, err)
return false, nil
}
if resp.StatusCode != 200 {
return false, fmt.Errorf("received non-success return status %q trying to access %s; got body: %s",
resp.Status, url, string(body))
}
if !strings.Contains(string(body), expect) {
return false, fmt.Errorf("received response body without expected substring %q: %s", expect, string(body))
}
if content != nil {
content.Write(body)
}
return true, nil
}
func TestNotReachableHTTP(ip string, port int) (bool, error) {
return TestNotReachableHTTPTimeout(ip, port, 5*time.Second)
}
func TestNotReachableHTTPTimeout(ip string, port int, timeout time.Duration) (bool, error) {
url := fmt.Sprintf("http://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for non-reachability check (%s)", url)
return false, nil
}
if port == 0 {
Failf("Got port==0 for non-reachability check (%s)", url)
return false, nil
}
Logf("Testing HTTP non-reachability of %v", url)
resp, err := httpGetNoConnectionPoolTimeout(url, timeout)
if err != nil {
Logf("Confirmed that %s is not reachable", url)
return true, nil
}
resp.Body.Close()
return false, nil
}
func TestReachableUDP(ip string, port int, request string, expect string) (bool, error) {
uri := fmt.Sprintf("udp://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", uri)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", uri)
return false, nil
}
Logf("Testing UDP reachability of %v", uri)
con, err := net.Dial("udp", ip+":"+strconv.Itoa(port))
if err != nil {
return false, fmt.Errorf("Failed to dial %s:%d: %v", ip, port, err)
}
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
if err != nil {
return false, fmt.Errorf("Failed to send request: %v", err)
}
var buf []byte = make([]byte, len(expect)+1)
err = con.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return false, fmt.Errorf("Failed to set deadline: %v", err)
}
_, err = con.Read(buf)
if err != nil {
return false, nil
}
if !strings.Contains(string(buf), expect) {
return false, fmt.Errorf("Failed to retrieve %q, got %q", expect, string(buf))
}
Logf("Successfully reached %v", uri)
return true, nil
}
func TestNotReachableUDP(ip string, port int, request string) (bool, error) {
uri := fmt.Sprintf("udp://%s:%d", ip, port)
if ip == "" {
Failf("Got empty IP for reachability check (%s)", uri)
return false, nil
}
if port == 0 {
Failf("Got port==0 for reachability check (%s)", uri)
return false, nil
}
Logf("Testing UDP non-reachability of %v", uri)
con, err := net.Dial("udp", ip+":"+strconv.Itoa(port))
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
}
_, err = con.Write([]byte(fmt.Sprintf("%s\n", request)))
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
}
var buf []byte = make([]byte, 1)
err = con.SetDeadline(time.Now().Add(3 * time.Second))
if err != nil {
return false, fmt.Errorf("Failed to set deadline: %v", err)
}
_, err = con.Read(buf)
if err != nil {
Logf("Confirmed that %s is not reachable", uri)
return true, nil
}
return false, nil
}
func TestHitNodesFromOutside(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String) error {
return TestHitNodesFromOutsideWithCount(externalIP, httpPort, timeout, expectedHosts, 1)
}
func TestHitNodesFromOutsideWithCount(externalIP string, httpPort int32, timeout time.Duration, expectedHosts sets.String,
countToSucceed int) error {
Logf("Waiting up to %v for satisfying expectedHosts for %v times", timeout, countToSucceed)
hittedHosts := sets.NewString()
count := 0
condition := func() (bool, error) {
var respBody bytes.Buffer
reached, err := TestReachableHTTPWithContentTimeout(externalIP, int(httpPort), "/hostname", "", &respBody,
1*time.Second)
if err != nil || !reached {
return false, nil
}
hittedHost := strings.TrimSpace(respBody.String())
if !expectedHosts.Has(hittedHost) {
Logf("Error hitting unexpected host: %v, reset counter: %v", hittedHost, count)
count = 0
return false, nil
}
if !hittedHosts.Has(hittedHost) {
hittedHosts.Insert(hittedHost)
Logf("Missing %+v, got %+v", expectedHosts.Difference(hittedHosts), hittedHosts)
}
if hittedHosts.Equal(expectedHosts) {
count++
if count >= countToSucceed {
return true, nil
}
}
return false, nil
}
if err := wait.Poll(time.Second, timeout, condition); err != nil {
return fmt.Errorf("error waiting for expectedHosts: %v, hittedHosts: %v, count: %v, expected count: %v",
expectedHosts, hittedHosts, count, countToSucceed)
}
return nil
}

View file

@ -0,0 +1,237 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"os"
"path"
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/fields"
)
// The following upgrade functions are passed into the framework below and used
// to do the actual upgrades.
var MasterUpgrade = func(v string) error {
switch TestContext.Provider {
case "gce":
return masterUpgradeGCE(v)
case "gke":
return masterUpgradeGKE(v)
default:
return fmt.Errorf("MasterUpgrade() is not implemented for provider %s", TestContext.Provider)
}
}
func masterUpgradeGCE(rawV string) error {
v := "v" + rawV
_, _, err := RunCmd(path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-M", v)
return err
}
func masterUpgradeGKE(v string) error {
Logf("Upgrading master to %q", v)
_, _, err := RunCmd("gcloud", "container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
"upgrade",
TestContext.CloudConfig.Cluster,
"--master",
fmt.Sprintf("--cluster-version=%s", v),
"--quiet")
return err
}
var NodeUpgrade = func(f *Framework, v string, img string) error {
// Perform the upgrade.
var err error
switch TestContext.Provider {
case "gce":
err = nodeUpgradeGCE(v, img)
case "gke":
err = nodeUpgradeGKE(v, img)
default:
err = fmt.Errorf("NodeUpgrade() is not implemented for provider %s", TestContext.Provider)
}
if err != nil {
return err
}
// Wait for it to complete and validate nodes are healthy.
//
// TODO(ihmccreery) We shouldn't have to wait for nodes to be ready in
// GKE; the operation shouldn't return until they all are.
Logf("Waiting up to %v for all nodes to be ready after the upgrade", RestartNodeReadyAgainTimeout)
if _, err := CheckNodesReady(f.ClientSet, RestartNodeReadyAgainTimeout, TestContext.CloudConfig.NumNodes); err != nil {
return err
}
return nil
}
func nodeUpgradeGCE(rawV, img string) error {
v := "v" + rawV
if img != "" {
env := append(os.Environ(), "KUBE_NODE_OS_DISTRIBUTION="+img)
_, _, err := RunCmdEnv(env, path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-N", "-o", v)
return err
}
_, _, err := RunCmd(path.Join(TestContext.RepoRoot, "cluster/gce/upgrade.sh"), "-N", v)
return err
}
func cleanupNodeUpgradeGCE(tmplBefore string) {
Logf("Cleaning up any unused node templates")
tmplAfter, err := MigTemplate()
if err != nil {
Logf("Could not get node template post-upgrade; may have leaked template %s", tmplBefore)
return
}
if tmplBefore == tmplAfter {
// The node upgrade failed so there's no need to delete
// anything.
Logf("Node template %s is still in use; not cleaning up", tmplBefore)
return
}
Logf("Deleting node template %s", tmplBefore)
if _, _, err := retryCmd("gcloud", "compute", "instance-templates",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
"delete",
tmplBefore); err != nil {
Logf("gcloud compute instance-templates delete %s call failed with err: %v", tmplBefore, err)
Logf("May have leaked instance template %q", tmplBefore)
}
}
func nodeUpgradeGKE(v string, img string) error {
Logf("Upgrading nodes to version %q and image %q", v, img)
args := []string{
"container",
"clusters",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
"upgrade",
TestContext.CloudConfig.Cluster,
fmt.Sprintf("--cluster-version=%s", v),
"--quiet",
}
if len(img) > 0 {
args = append(args, fmt.Sprintf("--image-type=%s", img))
}
_, _, err := RunCmd("gcloud", args...)
return err
}
// CheckNodesReady waits up to nt for expect nodes accessed by c to be ready,
// returning an error if this doesn't happen in time. It returns the names of
// nodes it finds.
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect.
var nodeList *v1.NodeList
var errLast error
start := time.Now()
found := wait.Poll(Poll, nt, func() (bool, error) {
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
nodeList, errLast = c.Core().Nodes().List(v1.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
if errLast != nil {
return false, nil
}
if len(nodeList.Items) != expect {
errLast = fmt.Errorf("expected to find %d nodes but found only %d (%v elapsed)",
expect, len(nodeList.Items), time.Since(start))
Logf("%v", errLast)
return false, nil
}
return true, nil
}) == nil
nodeNames := make([]string, len(nodeList.Items))
for i, n := range nodeList.Items {
nodeNames[i] = n.ObjectMeta.Name
}
if !found {
return nodeNames, fmt.Errorf("couldn't find %d nodes within %v; last error: %v",
expect, nt, errLast)
}
Logf("Successfully found %d nodes", expect)
// Next, ensure in parallel that all the nodes are ready. We subtract the
// time we spent waiting above.
timeout := nt - time.Since(start)
result := make(chan bool, len(nodeList.Items))
for _, n := range nodeNames {
n := n
go func() { result <- WaitForNodeToBeReady(c, n, timeout) }()
}
failed := false
// TODO(mbforbes): Change to `for range` syntax once we support only Go
// >= 1.4.
for i := range nodeList.Items {
_ = i
if !<-result {
failed = true
}
}
if failed {
return nodeNames, fmt.Errorf("at least one node failed to be ready")
}
return nodeNames, nil
}
// MigTemplate (GCE-only) returns the name of the MIG template that the
// nodes of the cluster use.
func MigTemplate() (string, error) {
var errLast error
var templ string
key := "instanceTemplate"
if wait.Poll(Poll, SingleCallTimeout, func() (bool, error) {
// TODO(mikedanese): make this hit the compute API directly instead of
// shelling out to gcloud.
// An `instance-groups managed describe` call outputs what we want to stdout.
output, _, err := retryCmd("gcloud", "compute", "instance-groups", "managed",
fmt.Sprintf("--project=%s", TestContext.CloudConfig.ProjectID),
"describe",
fmt.Sprintf("--zone=%s", TestContext.CloudConfig.Zone),
TestContext.CloudConfig.NodeInstanceGroup)
if err != nil {
errLast = fmt.Errorf("gcloud compute instance-groups managed describe call failed with err: %v", err)
return false, nil
}
// The 'describe' call probably succeeded; parse the output and try to
// find the line that looks like "instanceTemplate: url/to/<templ>" and
// return <templ>.
if val := ParseKVLines(output, key); len(val) > 0 {
url := strings.Split(val, "/")
templ = url[len(url)-1]
Logf("MIG group %s using template: %s", TestContext.CloudConfig.NodeInstanceGroup, templ)
return true, nil
}
errLast = fmt.Errorf("couldn't find %s in output to get MIG template. Output: %s", key, output)
return false, nil
}) != nil {
return "", fmt.Errorf("MigTemplate() failed with last error: %v", errLast)
}
return templ, nil
}

View file

@ -0,0 +1,135 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"k8s.io/kubernetes/test/e2e/perftype"
)
// TODO(random-liu): Change the tests to actually use PerfData from the beginning instead of
// translating one to the other here.
// currentApiCallMetricsVersion is the current apicall performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentApiCallMetricsVersion = "v1"
// ApiCallToPerfData transforms APIResponsiveness to PerfData.
func ApiCallToPerfData(apicalls APIResponsiveness) *perftype.PerfData {
perfData := &perftype.PerfData{Version: currentApiCallMetricsVersion}
for _, apicall := range apicalls.APICalls {
item := perftype.DataItem{
Data: map[string]float64{
"Perc50": float64(apicall.Latency.Perc50) / 1000000, // us -> ms
"Perc90": float64(apicall.Latency.Perc90) / 1000000,
"Perc99": float64(apicall.Latency.Perc99) / 1000000,
},
Unit: "ms",
Labels: map[string]string{
"Verb": apicall.Verb,
"Resource": apicall.Resource,
},
}
perfData.DataItems = append(perfData.DataItems, item)
}
return perfData
}
// currentKubeletPerfMetricsVersion is the current kubelet performance metrics version. We should
// bump up the version each time we make incompatible change to the metrics.
const currentKubeletPerfMetricsVersion = "v2"
// ResourceUsageToPerfData transforms ResourceUsagePerNode to PerfData. Notice that this function
// only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfData(usagePerNode ResourceUsagePerNode) *perftype.PerfData {
return ResourceUsageToPerfDataWithLabels(usagePerNode, nil)
}
// CPUUsageToPerfData transforms NodesCPUSummary to PerfData.
func CPUUsageToPerfData(usagePerNode NodesCPUSummary) *perftype.PerfData {
return CPUUsageToPerfDataWithLabels(usagePerNode, nil)
}
// PrintPerfData prints the perfdata in json format with PerfResultTag prefix.
// If an error occurs, nothing will be printed.
func PrintPerfData(p *perftype.PerfData) {
// Notice that we must make sure the perftype.PerfResultEnd is in a new line.
if str := PrettyPrintJSON(p); str != "" {
Logf("%s %s\n%s", perftype.PerfResultTag, str, perftype.PerfResultEnd)
}
}
// ResourceUsageToPerfDataWithLabels transforms ResourceUsagePerNode to PerfData with additional labels.
// Notice that this function only cares about memory usage, because cpu usage information will be extracted from NodesCPUSummary.
func ResourceUsageToPerfDataWithLabels(usagePerNode ResourceUsagePerNode, labels map[string]string) *perftype.PerfData {
items := []perftype.DataItem{}
for node, usages := range usagePerNode {
for c, usage := range usages {
item := perftype.DataItem{
Data: map[string]float64{
"memory": float64(usage.MemoryUsageInBytes) / (1024 * 1024),
"workingset": float64(usage.MemoryWorkingSetInBytes) / (1024 * 1024),
"rss": float64(usage.MemoryRSSInBytes) / (1024 * 1024),
},
Unit: "MB",
Labels: map[string]string{
"node": node,
"container": c,
"datatype": "resource",
"resource": "memory",
},
}
items = append(items, item)
}
}
return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
Labels: labels,
}
}
// CPUUsageToPerfDataWithLabels transforms NodesCPUSummary to PerfData with additional labels.
func CPUUsageToPerfDataWithLabels(usagePerNode NodesCPUSummary, labels map[string]string) *perftype.PerfData {
items := []perftype.DataItem{}
for node, usages := range usagePerNode {
for c, usage := range usages {
data := map[string]float64{}
for perc, value := range usage {
data[fmt.Sprintf("Perc%02.0f", perc*100)] = value * 1000
}
item := perftype.DataItem{
Data: data,
Unit: "mCPU",
Labels: map[string]string{
"node": node,
"container": c,
"datatype": "resource",
"resource": "cpu",
},
}
items = append(items, item)
}
}
return &perftype.PerfData{
Version: currentKubeletPerfMetricsVersion,
DataItems: items,
Labels: labels,
}
}

194
vendor/k8s.io/kubernetes/test/e2e/framework/pods.go generated vendored Normal file
View file

@ -0,0 +1,194 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"fmt"
"regexp"
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
// ImageWhiteList is the images used in the current test suite. It should be initialized in test suite and
// the images in the white list should be pre-pulled in the test suite. Currently, this is only used by
// node e2e test.
var ImageWhiteList sets.String
// Convenience method for getting a pod client interface in the framework's namespace,
// possibly applying test-suite specific transformations to the pod spec, e.g. for
// node e2e pod scheduling.
func (f *Framework) PodClient() *PodClient {
return &PodClient{
f: f,
PodInterface: f.ClientSet.Core().Pods(f.Namespace.Name),
}
}
type PodClient struct {
f *Framework
v1core.PodInterface
}
// Create creates a new pod according to the framework specifications (don't wait for it to start).
func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(pod)
ExpectNoError(err, "Error creating Pod")
return p
}
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
p := c.Create(pod)
ExpectNoError(c.f.WaitForPodRunning(p.Name))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
p, err := c.Get(p.Name, metav1.GetOptions{})
ExpectNoError(err)
return p
}
// CreateBatch create a batch of pods. All pods are created before waiting.
func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
ps := make([]*v1.Pod, len(pods))
var wg sync.WaitGroup
for i, pod := range pods {
wg.Add(1)
go func(i int, pod *v1.Pod) {
defer wg.Done()
defer GinkgoRecover()
ps[i] = c.CreateSync(pod)
}(i, pod)
}
wg.Wait()
return ps
}
// Update updates the pod object. It retries if there is a conflict, throw out error if
// there is any other errors. name is the pod name, updateFn is the function updating the
// pod object.
func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("failed to get pod %q: %v", name, err)
}
updateFn(pod)
_, err = c.PodInterface.Update(pod)
if err == nil {
Logf("Successfully updated pod %q", name)
return true, nil
}
if errors.IsConflict(err) {
Logf("Conflicting update to pod %q, re-get and re-update: %v", name, err)
return false, nil
}
return false, fmt.Errorf("failed to update pod %q: %v", name, err)
}))
}
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) {
err := c.Delete(name, options)
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
}
Expect(WaitForPodToDisappear(c.f.ClientSet, c.f.Namespace.Name, name, labels.Everything(),
2*time.Second, timeout)).To(Succeed(), "wait for pod %q to disappear", name)
}
// mungeSpec apply test-suite specific transformations to the pod spec.
func (c *PodClient) mungeSpec(pod *v1.Pod) {
if !TestContext.NodeE2E {
return
}
Expect(pod.Spec.NodeName).To(Or(BeZero(), Equal(TestContext.NodeName)), "Test misconfigured")
pod.Spec.NodeName = TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node.
pod.Spec.DNSPolicy = v1.DNSDefault
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
// we should not munge ImagePullPolicy for cluster e2e pods.
if !TestContext.PrepullImages {
return
}
// If prepull is enabled, munge the container spec to make sure the images are not pulled
// during the test.
for i := range pod.Spec.Containers {
c := &pod.Spec.Containers[i]
if c.ImagePullPolicy == v1.PullAlways {
// If the image pull policy is PullAlways, the image doesn't need to be in
// the white list or pre-pulled, because the image is expected to be pulled
// in the test anyway.
continue
}
// If the image policy is not PullAlways, the image must be in the white list and
// pre-pulled.
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
// been prepulled.
c.ImagePullPolicy = v1.PullNever
}
}
// TODO(random-liu): Move pod wait function into this file
// WaitForSuccess waits for pod to success.
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
Expect(WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
case v1.PodSucceeded:
return true, nil
default:
return false, nil
}
},
)).To(Succeed(), "wait for pod %q to success", name)
}
// MatchContainerOutput gest output of a container and match expected regexp in the output.
func (c *PodClient) MatchContainerOutput(name string, containerName string, expectedRegexp string) error {
f := c.f
output, err := GetPodLogs(f.ClientSet, f.Namespace.Name, name, containerName)
if err != nil {
return fmt.Errorf("failed to get output for container %q of pod %q", containerName, name)
}
regex, err := regexp.Compile(expectedRegexp)
if err != nil {
return fmt.Errorf("failed to compile regexp %q: %v", expectedRegexp, err)
}
if !regex.MatchString(output) {
return fmt.Errorf("failed to match regexp %q in output %q", expectedRegexp, output)
}
return nil
}

View file

@ -0,0 +1,362 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"bytes"
"fmt"
"math"
"sort"
"strconv"
"strings"
"sync"
"text/tabwriter"
"time"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/util/system"
)
const (
resourceDataGatheringPeriod = 60 * time.Second
probeDuration = 15 * time.Second
)
type ResourceConstraint struct {
CPUConstraint float64
MemoryConstraint uint64
}
type SingleContainerSummary struct {
Name string
Cpu float64
Mem uint64
}
// we can't have int here, as JSON does not accept integer keys.
type ResourceUsageSummary map[string][]SingleContainerSummary
func (s *ResourceUsageSummary) PrintHumanReadable() string {
buf := &bytes.Buffer{}
w := tabwriter.NewWriter(buf, 1, 0, 1, ' ', 0)
for perc, summaries := range *s {
buf.WriteString(fmt.Sprintf("%v percentile:\n", perc))
fmt.Fprintf(w, "container\tcpu(cores)\tmemory(MB)\n")
for _, summary := range summaries {
fmt.Fprintf(w, "%q\t%.3f\t%.2f\n", summary.Name, summary.Cpu, float64(summary.Mem)/(1024*1024))
}
w.Flush()
}
return buf.String()
}
func (s *ResourceUsageSummary) PrintJSON() string {
return PrettyPrintJSON(*s)
}
func computePercentiles(timeSeries []ResourceUsagePerContainer, percentilesToCompute []int) map[int]ResourceUsagePerContainer {
if len(timeSeries) == 0 {
return make(map[int]ResourceUsagePerContainer)
}
dataMap := make(map[string]*usageDataPerContainer)
for i := range timeSeries {
for name, data := range timeSeries[i] {
if dataMap[name] == nil {
dataMap[name] = &usageDataPerContainer{
cpuData: make([]float64, len(timeSeries)),
memUseData: make([]uint64, len(timeSeries)),
memWorkSetData: make([]uint64, len(timeSeries)),
}
}
dataMap[name].cpuData = append(dataMap[name].cpuData, data.CPUUsageInCores)
dataMap[name].memUseData = append(dataMap[name].memUseData, data.MemoryUsageInBytes)
dataMap[name].memWorkSetData = append(dataMap[name].memWorkSetData, data.MemoryWorkingSetInBytes)
}
}
for _, v := range dataMap {
sort.Float64s(v.cpuData)
sort.Sort(uint64arr(v.memUseData))
sort.Sort(uint64arr(v.memWorkSetData))
}
result := make(map[int]ResourceUsagePerContainer)
for _, perc := range percentilesToCompute {
data := make(ResourceUsagePerContainer)
for k, v := range dataMap {
percentileIndex := int(math.Ceil(float64(len(v.cpuData)*perc)/100)) - 1
data[k] = &ContainerResourceUsage{
Name: k,
CPUUsageInCores: v.cpuData[percentileIndex],
MemoryUsageInBytes: v.memUseData[percentileIndex],
MemoryWorkingSetInBytes: v.memWorkSetData[percentileIndex],
}
}
result[perc] = data
}
return result
}
func leftMergeData(left, right map[int]ResourceUsagePerContainer) map[int]ResourceUsagePerContainer {
result := make(map[int]ResourceUsagePerContainer)
for percentile, data := range left {
result[percentile] = data
if _, ok := right[percentile]; !ok {
continue
}
for k, v := range right[percentile] {
result[percentile][k] = v
}
}
return result
}
type resourceGatherWorker struct {
c clientset.Interface
nodeName string
wg *sync.WaitGroup
containerIDToNameMap map[string]string
containerIDs []string
stopCh chan struct{}
dataSeries []ResourceUsagePerContainer
finished bool
inKubemark bool
}
func (w *resourceGatherWorker) singleProbe() {
data := make(ResourceUsagePerContainer)
if w.inKubemark {
kubemarkData := GetKubemarkMasterComponentsResourceUsage()
if data == nil {
return
}
for k, v := range kubemarkData {
data[k] = &ContainerResourceUsage{
Name: v.Name,
MemoryWorkingSetInBytes: v.MemoryWorkingSetInBytes,
CPUUsageInCores: v.CPUUsageInCores,
}
}
} else {
nodeUsage, err := getOneTimeResourceUsageOnNode(w.c, w.nodeName, probeDuration, func() []string { return w.containerIDs }, true)
if err != nil {
Logf("Error while reading data from %v: %v", w.nodeName, err)
return
}
for k, v := range nodeUsage {
data[w.containerIDToNameMap[k]] = v
}
}
w.dataSeries = append(w.dataSeries, data)
}
func (w *resourceGatherWorker) gather(initialSleep time.Duration) {
defer utilruntime.HandleCrash()
defer w.wg.Done()
defer Logf("Closing worker for %v", w.nodeName)
defer func() { w.finished = true }()
select {
case <-time.After(initialSleep):
w.singleProbe()
for {
select {
case <-time.After(resourceDataGatheringPeriod):
w.singleProbe()
case <-w.stopCh:
return
}
}
case <-w.stopCh:
return
}
}
func (g *containerResourceGatherer) getKubeSystemContainersResourceUsage(c clientset.Interface) {
if len(g.workers) == 0 {
return
}
delayPeriod := resourceDataGatheringPeriod / time.Duration(len(g.workers))
delay := time.Duration(0)
for i := range g.workers {
go g.workers[i].gather(delay)
delay += delayPeriod
}
g.workerWg.Wait()
}
type containerResourceGatherer struct {
client clientset.Interface
stopCh chan struct{}
workers []resourceGatherWorker
workerWg sync.WaitGroup
containerIDToNameMap map[string]string
containerIDs []string
options ResourceGathererOptions
}
type ResourceGathererOptions struct {
inKubemark bool
masterOnly bool
}
func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOptions) (*containerResourceGatherer, error) {
g := containerResourceGatherer{
client: c,
stopCh: make(chan struct{}),
containerIDToNameMap: make(map[string]string),
containerIDs: make([]string, 0),
options: options,
}
if options.inKubemark {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
inKubemark: true,
stopCh: g.stopCh,
wg: &g.workerWg,
finished: false,
})
} else {
pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
return nil, err
}
for _, pod := range pods.Items {
for _, container := range pod.Status.ContainerStatuses {
containerID := strings.TrimPrefix(container.ContainerID, "docker:/")
g.containerIDToNameMap[containerID] = pod.Name + "/" + container.Name
g.containerIDs = append(g.containerIDs, containerID)
}
}
nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
return nil, err
}
for _, node := range nodeList.Items {
if !options.masterOnly || system.IsMasterNode(node.Name) {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,
nodeName: node.Name,
wg: &g.workerWg,
containerIDToNameMap: g.containerIDToNameMap,
containerIDs: g.containerIDs,
stopCh: g.stopCh,
finished: false,
inKubemark: false,
})
if options.masterOnly {
break
}
}
}
}
return &g, nil
}
// startGatheringData blocks until stopAndSummarize is called.
func (g *containerResourceGatherer) startGatheringData() {
g.getKubeSystemContainersResourceUsage(g.client)
}
func (g *containerResourceGatherer) stopAndSummarize(percentiles []int, constraints map[string]ResourceConstraint) (*ResourceUsageSummary, error) {
close(g.stopCh)
Logf("Closed stop channel. Waiting for %v workers", len(g.workers))
finished := make(chan struct{})
go func() {
g.workerWg.Wait()
finished <- struct{}{}
}()
select {
case <-finished:
Logf("Waitgroup finished.")
case <-time.After(2 * time.Minute):
unfinished := make([]string, 0)
for i := range g.workers {
if !g.workers[i].finished {
unfinished = append(unfinished, g.workers[i].nodeName)
}
}
Logf("Timed out while waiting for waitgroup, some workers failed to finish: %v", unfinished)
}
if len(percentiles) == 0 {
Logf("Warning! Empty percentile list for stopAndPrintData.")
return &ResourceUsageSummary{}, fmt.Errorf("Failed to get any resource usage data")
}
data := make(map[int]ResourceUsagePerContainer)
for i := range g.workers {
if g.workers[i].finished {
stats := computePercentiles(g.workers[i].dataSeries, percentiles)
data = leftMergeData(stats, data)
}
}
// Workers has been stopped. We need to gather data stored in them.
sortedKeys := []string{}
for name := range data[percentiles[0]] {
sortedKeys = append(sortedKeys, name)
}
sort.Strings(sortedKeys)
violatedConstraints := make([]string, 0)
summary := make(ResourceUsageSummary)
for _, perc := range percentiles {
for _, name := range sortedKeys {
usage := data[perc][name]
summary[strconv.Itoa(perc)] = append(summary[strconv.Itoa(perc)], SingleContainerSummary{
Name: name,
Cpu: usage.CPUUsageInCores,
Mem: usage.MemoryWorkingSetInBytes,
})
// Verifying 99th percentile of resource usage
if perc == 99 {
// Name has a form: <pod_name>/<container_name>
containerName := strings.Split(name, "/")[1]
if constraint, ok := constraints[containerName]; ok {
if usage.CPUUsageInCores > constraint.CPUConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v CPU",
name,
usage.CPUUsageInCores,
constraint.CPUConstraint,
),
)
}
if usage.MemoryWorkingSetInBytes > constraint.MemoryConstraint {
violatedConstraints = append(
violatedConstraints,
fmt.Sprintf("Container %v is using %v/%v MB of memory",
name,
float64(usage.MemoryWorkingSetInBytes)/(1024*1024),
float64(constraint.MemoryConstraint)/(1024*1024),
),
)
}
}
}
}
}
if len(violatedConstraints) > 0 {
return &summary, fmt.Errorf(strings.Join(violatedConstraints, "\n"))
}
return &summary, nil
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,243 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package framework
import (
"flag"
"os"
"time"
"github.com/onsi/ginkgo/config"
"github.com/spf13/viper"
"k8s.io/kubernetes/pkg/apis/componentconfig"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
"k8s.io/kubernetes/pkg/cloudprovider"
)
type TestContextType struct {
KubeConfig string
KubeContext string
KubeAPIContentType string
KubeVolumeDir string
CertDir string
Host string
// TODO: Deprecating this over time... instead just use gobindata_util.go , see #23987.
RepoRoot string
Provider string
CloudConfig CloudConfig
KubectlPath string
OutputDir string
ReportDir string
ReportPrefix string
Prefix string
MinStartupPods int
// Timeout for waiting for system pods to be running
SystemPodsStartupTimeout time.Duration
UpgradeTarget string
UpgradeImage string
PrometheusPushGateway string
ContainerRuntime string
MasterOSDistro string
NodeOSDistro string
VerifyServiceAccount bool
DeleteNamespace bool
DeleteNamespaceOnFailure bool
AllowedNotReadyNodes int
CleanStart bool
// If set to 'true' or 'all' framework will start a goroutine monitoring resource usage of system add-ons.
// It will read the data every 30 seconds from all Nodes and print summary during afterEach. If set to 'master'
// only master Node will be monitored.
GatherKubeSystemResourceUsageData string
GatherLogsSizes bool
GatherMetricsAfterTest bool
// Currently supported values are 'hr' for human-readable and 'json'. It's a comma separated list.
OutputPrintType string
// NodeSchedulableTimeout is the timeout for waiting for all nodes to be schedulable.
NodeSchedulableTimeout time.Duration
// CreateTestingNS is responsible for creating namespace used for executing e2e tests.
// It accepts namespace base name, which will be prepended with e2e prefix, kube client
// and labels to be applied to a namespace.
CreateTestingNS CreateTestingNSFn
// If set to true test will dump data about the namespace in which test was running.
DumpLogsOnFailure bool
// If the garbage collector is enabled in the kube-apiserver and kube-controller-manager.
GarbageCollectorEnabled bool
// FeatureGates is a set of key=value pairs that describe feature gates for alpha/experimental features.
FeatureGates string
// Node e2e specific test context
NodeTestContextType
// Viper-only parameters. These will in time replace all flags.
// Example: Create a file 'e2e.json' with the following:
// "Cadvisor":{
// "MaxRetries":"6"
// }
Viper string
Cadvisor struct {
MaxRetries int
SleepDurationMS int
}
LoggingSoak struct {
Scale int
MilliSecondsBetweenWaves int
}
}
// NodeTestContextType is part of TestContextType, it is shared by all node e2e test.
type NodeTestContextType struct {
// NodeE2E indicates whether it is running node e2e.
NodeE2E bool
// Name of the node to run tests on.
NodeName string
// NodeConformance indicates whether the test is running in node conformance mode.
NodeConformance bool
// PrepullImages indicates whether node e2e framework should prepull images.
PrepullImages bool
// KubeletConfig is the kubelet configuration the test is running against.
KubeletConfig componentconfig.KubeletConfiguration
}
type CloudConfig struct {
ProjectID string
Zone string
Cluster string
MasterName string
NodeInstanceGroup string
NumNodes int
ClusterTag string
Network string
Provider cloudprovider.Interface
}
var TestContext TestContextType
var federatedKubeContext string
// Register flags common to all e2e test suites.
func RegisterCommonFlags() {
// Turn on verbose by default to get spec names
config.DefaultReporterConfig.Verbose = true
// Turn on EmitSpecProgress to get spec progress (especially on interrupt)
config.GinkgoConfig.EmitSpecProgress = true
// Randomize specs as well as suites
config.GinkgoConfig.RandomizeAllSpecs = true
flag.StringVar(&TestContext.GatherKubeSystemResourceUsageData, "gather-resource-usage", "false", "If set to 'true' or 'all' framework will be monitoring resource usage of system all add-ons in (some) e2e tests, if set to 'master' framework will be monitoring master node only, if set to 'none' of 'false' monitoring will be turned off.")
flag.BoolVar(&TestContext.GatherLogsSizes, "gather-logs-sizes", false, "If set to true framework will be monitoring logs sizes on all machines running e2e tests.")
flag.BoolVar(&TestContext.GatherMetricsAfterTest, "gather-metrics-at-teardown", false, "If set to true framwork will gather metrics from all components after each test.")
flag.StringVar(&TestContext.OutputPrintType, "output-print-type", "hr", "Comma separated list: 'hr' for human readable summaries 'json' for JSON ones.")
flag.BoolVar(&TestContext.DumpLogsOnFailure, "dump-logs-on-failure", true, "If set to true test will dump data about the namespace in which test was running.")
flag.BoolVar(&TestContext.DeleteNamespace, "delete-namespace", true, "If true tests will delete namespace after completion. It is only designed to make debugging easier, DO NOT turn it off by default.")
flag.BoolVar(&TestContext.DeleteNamespaceOnFailure, "delete-namespace-on-failure", true, "If true, framework will delete test namespace on failure. Used only during test debugging.")
flag.IntVar(&TestContext.AllowedNotReadyNodes, "allowed-not-ready-nodes", 0, "If non-zero, framework will allow for that many non-ready nodes when checking for all ready nodes.")
flag.StringVar(&TestContext.Host, "host", "http://127.0.0.1:8080", "The host, or apiserver, to connect to")
flag.StringVar(&TestContext.ReportPrefix, "report-prefix", "", "Optional prefix for JUnit XML reports. Default is empty, which doesn't prepend anything to the default name.")
flag.StringVar(&TestContext.ReportDir, "report-dir", "", "Path to the directory where the JUnit XML reports should be saved. Default is empty, which doesn't generate these reports.")
flag.StringVar(&TestContext.FeatureGates, "feature-gates", "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
flag.StringVar(&TestContext.Viper, "viper-config", "e2e", "The name of the viper config i.e. 'e2e' will read values from 'e2e.json' locally. All e2e parameters are meant to be configurable by viper.")
}
// Register flags specific to the cluster e2e test suite.
func RegisterClusterFlags() {
flag.BoolVar(&TestContext.VerifyServiceAccount, "e2e-verify-service-account", true, "If true tests will verify the service account before running.")
flag.StringVar(&TestContext.KubeConfig, clientcmd.RecommendedConfigPathFlag, os.Getenv(clientcmd.RecommendedConfigPathEnvVar), "Path to kubeconfig containing embedded authinfo.")
flag.StringVar(&TestContext.KubeContext, clientcmd.FlagContext, "", "kubeconfig context to use/override. If unset, will use value from 'current-context'")
flag.StringVar(&TestContext.KubeAPIContentType, "kube-api-content-type", "application/vnd.kubernetes.protobuf", "ContentType used to communicate with apiserver")
flag.StringVar(&federatedKubeContext, "federated-kube-context", "e2e-federation", "kubeconfig context for federation.")
flag.StringVar(&TestContext.KubeVolumeDir, "volume-dir", "/var/lib/kubelet", "Path to the directory containing the kubelet volumes.")
flag.StringVar(&TestContext.CertDir, "cert-dir", "", "Path to the directory containing the certs. Default is empty, which doesn't use certs.")
flag.StringVar(&TestContext.RepoRoot, "repo-root", "../../", "Root directory of kubernetes repository, for finding test files.")
flag.StringVar(&TestContext.Provider, "provider", "", "The name of the Kubernetes provider (gce, gke, local, vagrant, etc.)")
flag.StringVar(&TestContext.KubectlPath, "kubectl-path", "kubectl", "The kubectl binary to use. For development, you might use 'cluster/kubectl.sh' here.")
flag.StringVar(&TestContext.OutputDir, "e2e-output-dir", "/tmp", "Output directory for interesting/useful test data, like performance data, benchmarks, and other metrics.")
flag.StringVar(&TestContext.Prefix, "prefix", "e2e", "A prefix to be added to cloud resources created during testing.")
flag.StringVar(&TestContext.ContainerRuntime, "container-runtime", "docker", "The container runtime of cluster VM instances (docker or rkt).")
flag.StringVar(&TestContext.MasterOSDistro, "master-os-distro", "debian", "The OS distribution of cluster master (debian, trusty, or coreos).")
flag.StringVar(&TestContext.NodeOSDistro, "node-os-distro", "debian", "The OS distribution of cluster VM instances (debian, trusty, or coreos).")
// TODO: Flags per provider? Rename gce-project/gce-zone?
cloudConfig := &TestContext.CloudConfig
flag.StringVar(&cloudConfig.MasterName, "kube-master", "", "Name of the kubernetes master. Only required if provider is gce or gke")
flag.StringVar(&cloudConfig.ProjectID, "gce-project", "", "The GCE project being used, if applicable")
flag.StringVar(&cloudConfig.Zone, "gce-zone", "", "GCE zone being used, if applicable")
flag.StringVar(&cloudConfig.Cluster, "gke-cluster", "", "GKE name of cluster being used, if applicable")
flag.StringVar(&cloudConfig.NodeInstanceGroup, "node-instance-group", "", "Name of the managed instance group for nodes. Valid only for gce, gke or aws. If there is more than one group: comma separated list of groups.")
flag.StringVar(&cloudConfig.Network, "network", "e2e", "The cloud provider network for this e2e cluster.")
flag.IntVar(&cloudConfig.NumNodes, "num-nodes", -1, "Number of nodes in the cluster")
flag.StringVar(&cloudConfig.ClusterTag, "cluster-tag", "", "Tag used to identify resources. Only required if provider is aws.")
flag.IntVar(&TestContext.MinStartupPods, "minStartupPods", 0, "The number of pods which we need to see in 'Running' state with a 'Ready' condition of true, before we try running tests. This is useful in any cluster which needs some base pod-based services running before it can be used.")
flag.DurationVar(&TestContext.SystemPodsStartupTimeout, "system-pods-startup-timeout", 10*time.Minute, "Timeout for waiting for all system pods to be running before starting tests.")
flag.DurationVar(&TestContext.NodeSchedulableTimeout, "node-schedulable-timeout", 4*time.Hour, "Timeout for waiting for all nodes to be schedulable.")
flag.StringVar(&TestContext.UpgradeTarget, "upgrade-target", "ci/latest", "Version to upgrade to (e.g. 'release/stable', 'release/latest', 'ci/latest', '0.19.1', '0.19.1-669-gabac8c8') if doing an upgrade test.")
flag.StringVar(&TestContext.UpgradeImage, "upgrade-image", "", "Image to upgrade to (e.g. 'container_vm' or 'gci') if doing an upgrade test.")
flag.StringVar(&TestContext.PrometheusPushGateway, "prom-push-gateway", "", "The URL to prometheus gateway, so that metrics can be pushed during e2es and scraped by prometheus. Typically something like 127.0.0.1:9091.")
flag.BoolVar(&TestContext.CleanStart, "clean-start", false, "If true, purge all namespaces except default and system before running tests. This serves to Cleanup test namespaces from failed/interrupted e2e runs in a long-lived cluster.")
flag.BoolVar(&TestContext.GarbageCollectorEnabled, "garbage-collector-enabled", true, "Set to true if the garbage collector is enabled in the kube-apiserver and kube-controller-manager, then some tests will rely on the garbage collector to delete dependent resources.")
}
// Register flags specific to the node e2e test suite.
func RegisterNodeFlags() {
// Mark the test as node e2e when node flags are api.Registry.
TestContext.NodeE2E = true
flag.StringVar(&TestContext.NodeName, "node-name", "", "Name of the node to run tests on.")
// TODO(random-liu): Move kubelet start logic out of the test.
// TODO(random-liu): Move log fetch logic out of the test.
// There are different ways to start kubelet (systemd, initd, docker, rkt, manually started etc.)
// and manage logs (journald, upstart etc.).
// For different situation we need to mount different things into the container, run different commands.
// It is hard and unnecessary to deal with the complexity inside the test suite.
flag.BoolVar(&TestContext.NodeConformance, "conformance", false, "If true, the test suite will not start kubelet, and fetch system log (kernel, docker, kubelet log etc.) to the report directory.")
flag.BoolVar(&TestContext.PrepullImages, "prepull-images", true, "If true, prepull images so image pull failures do not cause test failures.")
}
// overwriteFlagsWithViperConfig finds and writes values to flags using viper as input.
func overwriteFlagsWithViperConfig() {
viperFlagSetter := func(f *flag.Flag) {
if viper.IsSet(f.Name) {
f.Value.Set(viper.GetString(f.Name))
}
}
flag.VisitAll(viperFlagSetter)
}
// ViperizeFlags sets up all flag and config processing. Future configuration info should be added to viper, not to flags.
func ViperizeFlags() {
// Part 1: Set regular flags.
// TODO: Future, lets eliminate e2e 'flag' deps entirely in favor of viper only,
// since go test 'flag's are sort of incompatible w/ flag, glog, etc.
RegisterCommonFlags()
RegisterClusterFlags()
flag.Parse()
// Part 2: Set Viper provided flags.
// This must be done after common flags are registered, since Viper is a flag option.
viper.SetConfigName(TestContext.Viper)
viper.AddConfigPath(".")
viper.ReadInConfig()
// TODO Consider wether or not we want to use overwriteFlagsWithViperConfig().
viper.Unmarshal(&TestContext)
}

5302
vendor/k8s.io/kubernetes/test/e2e/framework/util.go generated vendored Normal file

File diff suppressed because it is too large Load diff

444
vendor/k8s.io/kubernetes/test/e2e/garbage_collector.go generated vendored Normal file
View file

@ -0,0 +1,444 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
v1beta1 "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
func getOrphanOptions() *v1.DeleteOptions {
var trueVar = true
return &v1.DeleteOptions{OrphanDependents: &trueVar}
}
func getNonOrphanOptions() *v1.DeleteOptions {
var falseVar = false
return &v1.DeleteOptions{OrphanDependents: &falseVar}
}
var zero = int64(0)
var deploymentLabels = map[string]string{"app": "gc-test"}
var podTemplateSpec = v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: deploymentLabels,
},
Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx:1.7.9",
},
},
},
}
func newOwnerDeployment(f *framework.Framework, deploymentName string) *v1beta1.Deployment {
replicas := int32(2)
return &v1beta1.Deployment{
ObjectMeta: v1.ObjectMeta{
Name: deploymentName,
},
Spec: v1beta1.DeploymentSpec{
Replicas: &replicas,
Selector: &metav1.LabelSelector{MatchLabels: deploymentLabels},
Strategy: v1beta1.DeploymentStrategy{
Type: v1beta1.RollingUpdateDeploymentStrategyType,
},
Template: podTemplateSpec,
},
}
}
func newOwnerRC(f *framework.Framework, name string) *v1.ReplicationController {
var replicas int32
replicas = 2
return &v1.ReplicationController{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicationController",
APIVersion: "v1",
},
ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
Spec: v1.ReplicationControllerSpec{
Replicas: &replicas,
Selector: map[string]string{"app": "gc-test"},
Template: &podTemplateSpec,
},
}
}
// verifyRemainingDeploymentsAndReplicaSets verifies if the number of the remaining deployment
// and rs are deploymentNum and rsNum. It returns error if the
// communication with the API server fails.
func verifyRemainingDeploymentsAndReplicaSets(
f *framework.Framework,
clientSet clientset.Interface,
deployment *v1beta1.Deployment,
deploymentNum, rsNum int,
) (bool, error) {
var ret = true
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
if len(rs.Items) != rsNum {
ret = false
By(fmt.Sprintf("expected %d rs, got %d rs", rsNum, len(rs.Items)))
}
deployments, err := clientSet.Extensions().Deployments(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list deployments: %v", err)
}
if len(deployments.Items) != deploymentNum {
ret = false
By(fmt.Sprintf("expected %d Deploymentss, got %d Deployments", deploymentNum, len(deployments.Items)))
}
return ret, nil
}
// verifyRemainingObjects verifies if the number of the remaining replication
// controllers and pods are rcNum and podNum. It returns error if the
// communication with the API server fails.
func verifyRemainingObjects(f *framework.Framework, clientSet clientset.Interface, rcNum, podNum int) (bool, error) {
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
pods, err := clientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
var ret = true
if len(pods.Items) != podNum {
ret = false
By(fmt.Sprintf("expected %d pods, got %d pods", podNum, len(pods.Items)))
}
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list replication controllers: %v", err)
}
if len(rcs.Items) != rcNum {
ret = false
By(fmt.Sprintf("expected %d RCs, got %d RCs", rcNum, len(rcs.Items)))
}
return ret, nil
}
func gatherMetrics(f *framework.Framework) {
By("Gathering metrics")
var summary framework.TestDataSummary
grabber, err := metrics.NewMetricsGrabber(f.ClientSet, false, false, true, false)
if err != nil {
framework.Logf("Failed to create MetricsGrabber. Skipping metrics gathering.")
} else {
received, err := grabber.Grab()
if err != nil {
framework.Logf("MetricsGrabber failed grab metrics. Skipping metrics gathering.")
} else {
summary = (*framework.MetricsForE2E)(&received)
framework.Logf(summary.PrintHumanReadable())
}
}
}
var _ = framework.KubeDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
It("should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
// We intentionally don't wait the number of pods to reach
// rc.Spec.Replicas. We want to see if the garbage collector and the
// rc manager work properly if the rc is deleted before it reaches
// stasis.
if len(pods.Items) > 0 {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc to create some pods: %v", err)
}
By("delete the rc")
deleteOptions := getNonOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for all pods to be garbage collected")
// wait for the RCs and Pods to reach the expected numbers.
if err := wait.Poll(5*time.Second, 60*time.Second, func() (bool, error) {
return verifyRemainingObjects(f, clientSet, 0, 0)
}); err != nil {
framework.Failf("failed to wait for all pods to be deleted: %v", err)
remainingPods, err := podClient.List(v1.ListOptions{})
if err != nil {
framework.Failf("failed to list pods post mortem: %v", err)
} else {
framework.Failf("remaining pods are: %#v", remainingPods)
}
}
gatherMetrics(f)
})
It("should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
replicas := int32(100)
rc.Spec.Replicas = &replicas
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
By("delete the rc")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for the rc to be deleted")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rcs, err := rcClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rcs: %v", err)
}
if len(rcs.Items) != 0 {
return false, nil
}
return true, nil
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("%v", err)
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
}
return false, nil
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("%v", err)
}
gatherMetrics(f)
})
It("should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
rc := newOwnerRC(f, rcName)
By("create the rc")
rc, err := rcClient.Create(rc)
if err != nil {
framework.Failf("Failed to create replication controller: %v", err)
}
// wait for rc to create some pods
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
rc, err := rcClient.Get(rc.Name, metav1.GetOptions{})
if err != nil {
return false, fmt.Errorf("Failed to get rc: %v", err)
}
if rc.Status.Replicas == *rc.Spec.Replicas {
return true, nil
} else {
return false, nil
}
}); err != nil {
framework.Failf("failed to wait for the rc.Status.Replicas to reach rc.Spec.Replicas: %v", err)
}
By("delete the rc")
deleteOptions := &v1.DeleteOptions{}
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(rc.UID))
if err := rcClient.Delete(rc.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the rc: %v", err)
}
By("wait for 30 seconds to see if the garbage collector mistakenly deletes the pods")
if err := wait.Poll(5*time.Second, 30*time.Second, func() (bool, error) {
pods, err := podClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list pods: %v", err)
}
if e, a := int(*(rc.Spec.Replicas)), len(pods.Items); e != a {
return false, fmt.Errorf("expect %d pods, got %d pods", e, a)
}
return false, nil
}); err != nil && err != wait.ErrWaitTimeout {
framework.Failf("%v", err)
}
gatherMetrics(f)
})
It("should delete RS created by deployment when not orphaning", func() {
clientSet := f.ClientSet
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
deploymentName := "simpletest.deployment"
deployment := newOwnerDeployment(f, deploymentName)
By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
return len(rsList.Items) > 0, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
By("delete the deployment")
deleteOptions := getNonOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}
By("wait for all rs to be garbage collected")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsAndReplicaSets(f, clientSet, deployment, 0, 0)
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Failed to wait for all rs to be garbage collected: %v", err)
remainingRSs, err := rsClient.List(v1.ListOptions{})
if err != nil {
framework.Failf("failed to list RSs post mortem: %v", err)
} else {
framework.Failf("remaining rs are: %#v", remainingRSs)
}
}
gatherMetrics(f)
})
It("should orphan RS created by deployment when deleteOptions.OrphanDependents is true", func() {
clientSet := f.ClientSet
deployClient := clientSet.Extensions().Deployments(f.Namespace.Name)
rsClient := clientSet.Extensions().ReplicaSets(f.Namespace.Name)
deploymentName := "simpletest.deployment"
deployment := newOwnerDeployment(f, deploymentName)
By("create the deployment")
createdDeployment, err := deployClient.Create(deployment)
if err != nil {
framework.Failf("Failed to create deployment: %v", err)
}
// wait for deployment to create some rs
By("Wait for the Deployment to create new ReplicaSet")
err = wait.PollImmediate(500*time.Millisecond, 1*time.Minute, func() (bool, error) {
rsList, err := rsClient.List(v1.ListOptions{})
if err != nil {
return false, fmt.Errorf("Failed to list rs: %v", err)
}
return len(rsList.Items) > 0, nil
})
if err == wait.ErrWaitTimeout {
err = fmt.Errorf("Failed to wait for the Deployment to create some ReplicaSet: %v", err)
}
By("delete the deployment")
deleteOptions := getOrphanOptions()
deleteOptions.Preconditions = v1.NewUIDPreconditions(string(createdDeployment.UID))
if err := deployClient.Delete(deployment.ObjectMeta.Name, deleteOptions); err != nil {
framework.Failf("failed to delete the deployment: %v", err)
}
By("wait for 2 Minute to see if the garbage collector mistakenly deletes the rs")
err = wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
return verifyRemainingDeploymentsAndReplicaSets(f, clientSet, deployment, 0, 1)
})
if err != nil {
err = fmt.Errorf("Failed to wait to see if the garbage collecter mistakenly deletes the rs: %v", err)
remainingRSs, err := rsClient.List(v1.ListOptions{})
if err != nil {
framework.Failf("failed to list RSs post mortem: %v", err)
} else {
framework.Failf("remaining rs post mortem: %#v", remainingRSs)
}
remainingDSs, err := deployClient.List(v1.ListOptions{})
if err != nil {
framework.Failf("failed to list Deployments post mortem: %v", err)
} else {
framework.Failf("remaining deployment's post mortem: %#v", remainingDSs)
}
}
rs, err := clientSet.Extensions().ReplicaSets(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
framework.Failf("Failed to list ReplicaSet %v", err)
}
for _, replicaSet := range rs.Items {
if controller.GetControllerOf(&replicaSet.ObjectMeta) != nil {
framework.Failf("Found ReplicaSet with non nil ownerRef %v", replicaSet)
}
}
gatherMetrics(f)
})
})

54
vendor/k8s.io/kubernetes/test/e2e/generated/BUILD generated vendored Normal file
View file

@ -0,0 +1,54 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"gobindata_util.go",
"main.go",
":bindata",
],
deps = [
"//vendor:github.com/golang/glog",
],
)
genrule(
name = "bindata",
srcs = [
"//examples:sources",
"//test/images:sources",
"//test/fixtures:sources",
"//test/e2e/testing-manifests:sources",
],
outs = ["bindata.go"],
cmd = """
$(location //vendor:github.com/jteeuwen/go-bindata/go-bindata_bin) \
-nometadata -o "$(OUTS)" -pkg generated \
-prefix $$(pwd) \
-ignore .jpg -ignore .png -ignore .md \
$(SRCS)
""",
tools = [
"//vendor:github.com/jteeuwen/go-bindata/go-bindata_bin",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,36 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generated
//go:generate ../../../hack/generate-bindata.sh
import "github.com/golang/glog"
/*
ReadOrDie reads a file from gobindata.
Relies heavily on the successful generation of build artifacts as per the go:generate directives above.
*/
func ReadOrDie(filePath string) []byte {
fileBytes, err := Asset(filePath)
if err != nil {
gobindataMsg := "An error occured, possibly gobindata doesn't know about the file you're opening. For questions on maintaining gobindata, contact the sig-testing group."
glog.Infof("Available gobindata files: %v ", AssetNames())
glog.Fatalf("Failed opening %v , with error %v. %v.", filePath, err, gobindataMsg)
}
return fileBytes
}

19
vendor/k8s.io/kubernetes/test/e2e/generated/main.go generated vendored Normal file
View file

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package generated
// No code is needed here. This is a stub for compilation purposes.

View file

@ -0,0 +1,351 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"strconv"
"time"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
clientv1 "k8s.io/client-go/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
func stagingClientPod(name, value string) clientv1.Pod {
return clientv1.Pod{
ObjectMeta: clientv1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: clientv1.PodSpec{
Containers: []clientv1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []clientv1.ContainerPort{{ContainerPort: 80}},
},
},
},
}
}
func testingPod(name, value string) v1.Pod {
return v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &v1.Probe{
Handler: v1.Handler{
HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.FromInt(8080),
},
},
InitialDelaySeconds: 30,
},
},
},
},
}
}
func observeCreation(w watch.Interface) {
select {
case event, _ := <-w.ResultChan():
if event.Type != watch.Added {
framework.Failf("Failed to observe the creation: %v", event)
}
case <-time.After(30 * time.Second):
framework.Failf("Timeout while waiting for observing the creation")
}
}
func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
deleted := false
timeout := false
timer := time.After(60 * time.Second)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
obj = event.Object
deleted = true
}
case <-timer:
timeout = true
}
}
if !deleted {
framework.Failf("Failed to observe pod deletion")
}
return
}
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() {
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
podCopy := testingPod(name, value)
pod := &podCopy
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := v1.ListOptions{LabelSelector: selector}
pods, err := podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
options = v1.ListOptions{
LabelSelector: selector,
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
By("creating the pod")
pod, err = podClient.Create(pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
}
By("verifying the pod is in kubernetes")
options = v1.ListOptions{
LabelSelector: selector,
ResourceVersion: pod.ResourceVersion,
}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(1))
By("verifying pod creation was observed")
observeCreation(w)
// We need to wait for the pod to be scheduled, otherwise the deletion
// will be carried out immediately rather than gracefully.
framework.ExpectNoError(f.WaitForPodRunning(pod.Name))
By("deleting the pod gracefully")
if err := podClient.Delete(pod.Name, v1.NewDeleteOptions(30)); err != nil {
framework.Failf("Failed to delete pod: %v", err)
}
By("verifying pod deletion was observed")
obj := observeObjectDeletion(w)
lastPod := obj.(*v1.Pod)
Expect(lastPod.DeletionTimestamp).ToNot(BeNil())
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
options = v1.ListOptions{LabelSelector: selector}
pods, err = podClient.List(options)
if err != nil {
framework.Failf("Failed to list pods to verify deletion: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
})
})
func newTestingCronJob(name string, value string) *v2alpha1.CronJob {
parallelism := int32(1)
completions := int32(1)
return &v2alpha1.CronJob{
ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"time": value,
},
},
Spec: v2alpha1.CronJobSpec{
Schedule: "*/1 * * * ?",
ConcurrencyPolicy: v2alpha1.AllowConcurrent,
JobTemplate: v2alpha1.JobTemplateSpec{
Spec: v2alpha1.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{
RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
},
},
},
},
},
},
},
},
},
}
}
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs", func() {
var enabled bool
groupList, err := f.ClientSet.Discovery().ServerGroups()
framework.ExpectNoError(err)
for _, group := range groupList.Groups {
if group.Name == v2alpha1.GroupName {
for _, version := range group.Versions {
if version.Version == v2alpha1.SchemeGroupVersion.Version {
enabled = true
break
}
}
}
}
if !enabled {
framework.Logf("%s is not enabled, test skipped", v2alpha1.SchemeGroupVersion)
return
}
cronJobClient := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name)
By("constructing the cronJob")
name := "cronjob" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
cronJob := newTestingCronJob(name, value)
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})).String()
options := v1.ListOptions{LabelSelector: selector}
cronJobs, err := cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(0))
options = v1.ListOptions{
LabelSelector: selector,
ResourceVersion: cronJobs.ListMeta.ResourceVersion,
}
w, err := cronJobClient.Watch(options)
if err != nil {
framework.Failf("Failed to set up watch: %v", err)
}
By("creating the cronJob")
cronJob, err = cronJobClient.Create(cronJob)
if err != nil {
framework.Failf("Failed to create cronJob: %v", err)
}
By("verifying the cronJob is in kubernetes")
options = v1.ListOptions{
LabelSelector: selector,
ResourceVersion: cronJob.ResourceVersion,
}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to query for cronJobs: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(1))
By("verifying cronJob creation was observed")
observeCreation(w)
By("deleting the cronJob")
if err := cronJobClient.Delete(cronJob.Name, nil); err != nil {
framework.Failf("Failed to delete cronJob: %v", err)
}
options = v1.ListOptions{LabelSelector: selector}
cronJobs, err = cronJobClient.List(options)
if err != nil {
framework.Failf("Failed to list cronJobs to verify deletion: %v", err)
}
Expect(len(cronJobs.Items)).To(Equal(0))
})
})
var _ = framework.KubeDescribe("Staging client repo client", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() {
podClient := f.StagingClient.Core().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
podCopy := stagingClientPod(name, value)
pod := &podCopy
By("verifying no pod exists before the test")
pods, err := podClient.List(clientv1.ListOptions{})
if err != nil {
framework.Failf("Failed to query for pods: %v", err)
}
Expect(len(pods.Items)).To(Equal(0))
By("creating the pod")
pod, err = podClient.Create(pod)
if err != nil {
framework.Failf("Failed to create pod: %v", err)
}
By("verifying the pod is in kubernetes")
timeout := 1 * time.Minute
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = podClient.List(clientv1.ListOptions{})
if err != nil {
return false, err
}
if len(pods.Items) == 1 {
return true, nil
}
return false, nil
}); err != nil {
framework.Failf("Err : %s\n. Failed to wait for 1 pod to be created", err)
}
})
})

111
vendor/k8s.io/kubernetes/test/e2e/gke_local_ssd.go generated vendored Normal file
View file

@ -0,0 +1,111 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os/exec"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("GKE local SSD [Feature:GKELocalSSD]", func() {
f := framework.NewDefaultFramework("localssd")
BeforeEach(func() {
framework.SkipUnlessProviderIs("gke")
})
It("should write and read from node local SSD [Feature:GKELocalSSD]", func() {
framework.Logf("Start local SSD test")
createNodePoolWithLocalSsds("np-ssd")
doTestWriteAndReadToLocalSsd(f)
})
})
func createNodePoolWithLocalSsds(nodePoolName string) {
framework.Logf("Create node pool: %s with local SSDs in cluster: %s ",
nodePoolName, framework.TestContext.CloudConfig.Cluster)
out, err := exec.Command("gcloud", "alpha", "container", "node-pools", "create",
nodePoolName,
fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster),
"--local-ssd-count=1").CombinedOutput()
if err != nil {
framework.Failf("Failed to create node pool %s: Err: %v\n%v", nodePoolName, err, string(out))
}
framework.Logf("Successfully created node pool %s:\n%v", nodePoolName, string(out))
}
func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
var pod = testPodWithSsd("echo 'hello world' > /mnt/disks/ssd0/data && sleep 1 && cat /mnt/disks/ssd0/data")
var msg string
var out = []string{"hello world"}
f.TestContainerOutput(msg, pod, 0, out)
}
func testPodWithSsd(command string) *v1.Pod {
containerName := "test-container"
volumeName := "test-ssd-volume"
path := "/mnt/disks/ssd0"
podName := "pod-" + string(uuid.NewUUID())
image := "ubuntu:14.04"
return &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: api.Registry.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
ObjectMeta: v1.ObjectMeta{
Name: podName,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: containerName,
Image: image,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
},
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
},
},
NodeSelector: map[string]string{"cloud.google.com/gke-local-ssd": "true"},
},
}
}

108
vendor/k8s.io/kubernetes/test/e2e/gke_node_pools.go generated vendored Normal file
View file

@ -0,0 +1,108 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os/exec"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
var _ = framework.KubeDescribe("GKE node pools [Feature:GKENodePool]", func() {
f := framework.NewDefaultFramework("node-pools")
BeforeEach(func() {
framework.SkipUnlessProviderIs("gke")
})
It("should create a cluster with multiple node pools [Feature:GKENodePool]", func() {
framework.Logf("Start create node pool test")
testCreateDeleteNodePool(f, "test-pool")
})
})
func testCreateDeleteNodePool(f *framework.Framework, poolName string) {
framework.Logf("Create node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
clusterStr := fmt.Sprintf("--cluster=%s", framework.TestContext.CloudConfig.Cluster)
out, err := exec.Command("gcloud", "container", "node-pools", "create",
poolName,
clusterStr,
"--num-nodes=2").CombinedOutput()
framework.Logf("\n%s", string(out))
if err != nil {
framework.Failf("Failed to create node pool %q. Err: %v\n%v", poolName, err, string(out))
}
framework.Logf("Successfully created node pool %q.", poolName)
out, err = exec.Command("gcloud", "container", "node-pools", "list",
clusterStr).CombinedOutput()
if err != nil {
framework.Failf("Failed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
}
framework.Logf("Node pools:\n%s", string(out))
framework.Logf("Checking that 2 nodes have the correct node pool label.")
nodeCount := nodesWithPoolLabel(f, poolName)
if nodeCount != 2 {
framework.Failf("Wanted 2 nodes with node pool label, got: %v", nodeCount)
}
framework.Logf("Success, found 2 nodes with correct node pool labels.")
framework.Logf("Deleting node pool: %q in cluster: %q", poolName, framework.TestContext.CloudConfig.Cluster)
out, err = exec.Command("gcloud", "container", "node-pools", "delete",
poolName,
clusterStr,
"-q").CombinedOutput()
framework.Logf("\n%s", string(out))
if err != nil {
framework.Failf("Failed to delete node pool %q. Err: %v\n%v", poolName, err, string(out))
}
framework.Logf("Successfully deleted node pool %q.", poolName)
out, err = exec.Command("gcloud", "container", "node-pools", "list",
clusterStr).CombinedOutput()
if err != nil {
framework.Failf("\nFailed to list node pools from cluster %q. Err: %v\n%v", framework.TestContext.CloudConfig.Cluster, err, string(out))
}
framework.Logf("\nNode pools:\n%s", string(out))
framework.Logf("Checking that no nodes have the deleted node pool's label.")
nodeCount = nodesWithPoolLabel(f, poolName)
if nodeCount != 0 {
framework.Failf("Wanted 0 nodes with node pool label, got: %v", nodeCount)
}
framework.Logf("Success, found no nodes with the deleted node pool's label.")
}
// nodesWithPoolLabel returns the number of nodes that have the "gke-nodepool"
// label with the given node pool name.
func nodesWithPoolLabel(f *framework.Framework, poolName string) int {
nodeCount := 0
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
for _, node := range nodeList.Items {
if poolLabel := node.Labels["cloud.google.com/gke-nodepool"]; poolLabel == poolName {
nodeCount++
}
}
return nodeCount
}

244
vendor/k8s.io/kubernetes/test/e2e/ha_master.go generated vendored Normal file
View file

@ -0,0 +1,244 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"os/exec"
"path"
"strconv"
"strings"
"time"
. "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/test/e2e/framework"
)
func addMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Adding a new master replica, zone: %s", zone))
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "true", "false")
framework.Logf("%s", v)
if err != nil {
return err
}
return nil
}
func removeMasterReplica(zone string) error {
framework.Logf(fmt.Sprintf("Removing an existing master replica, zone: %s", zone))
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "false", "false")
framework.Logf("%s", v)
if err != nil {
return err
}
return nil
}
func addWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Adding worker nodes, zone: %s", zone))
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-grow-cluster.sh"), zone, "true", "false", "true")
framework.Logf("%s", v)
if err != nil {
return err
}
return nil
}
func removeWorkerNodes(zone string) error {
framework.Logf(fmt.Sprintf("Removing worker nodes, zone: %s", zone))
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/e2e-internal/e2e-shrink-cluster.sh"), zone, "true", "true", "true")
framework.Logf("%s", v)
if err != nil {
return err
}
return nil
}
func verifyRCs(c clientset.Interface, ns string, names []string) {
for _, name := range names {
framework.ExpectNoError(framework.VerifyPods(c, ns, name, true, 1))
}
}
func createNewRC(c clientset.Interface, ns string, name string) {
_, err := newRCByName(c, ns, name, 1, nil)
framework.ExpectNoError(err)
}
func findRegionForZone(zone string) string {
region, err := exec.Command("gcloud", "compute", "zones", "list", zone, "--quiet", "--format=[no-heading](region)").CombinedOutput()
framework.ExpectNoError(err)
if string(region) == "" {
framework.Failf("Region not found; zone: %s", zone)
}
return string(region)
}
func findZonesForRegion(region string) []string {
output, err := exec.Command("gcloud", "compute", "zones", "list", "--filter=region="+region,
"--quiet", "--format=[no-heading](name)").CombinedOutput()
framework.ExpectNoError(err)
zones := strings.Split(string(output), "\n")
return zones
}
// removeZoneFromZones removes zone from zones slide.
// Please note that entries in zones can be repeated. In such situation only one replica is removed.
func removeZoneFromZones(zones []string, zone string) []string {
idx := -1
for j, z := range zones {
if z == zone {
idx = j
break
}
}
if idx >= 0 {
return zones[:idx+copy(zones[idx:], zones[idx+1:])]
}
return zones
}
var _ = framework.KubeDescribe("HA-master [Feature:HAMaster]", func() {
f := framework.NewDefaultFramework("ha-master")
var c clientset.Interface
var ns string
var additionalReplicaZones []string
var additionalNodesZones []string
var existingRCs []string
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce")
c = f.ClientSet
ns = f.Namespace.Name
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
additionalReplicaZones = make([]string, 0)
existingRCs = make([]string, 0)
})
AfterEach(func() {
// Clean-up additional worker nodes if the test execution was broken.
for _, zone := range additionalNodesZones {
removeWorkerNodes(zone)
}
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
// Clean-up additional master replicas if the test execution was broken.
for _, zone := range additionalReplicaZones {
removeMasterReplica(zone)
}
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, 1, 10*time.Minute))
})
type Action int
const (
None Action = iota
AddReplica
RemoveReplica
AddNodes
RemoveNodes
)
step := func(action Action, zone string) {
switch action {
case None:
case AddReplica:
framework.ExpectNoError(addMasterReplica(zone))
additionalReplicaZones = append(additionalReplicaZones, zone)
case RemoveReplica:
framework.ExpectNoError(removeMasterReplica(zone))
additionalReplicaZones = removeZoneFromZones(additionalReplicaZones, zone)
case AddNodes:
framework.ExpectNoError(addWorkerNodes(zone))
additionalNodesZones = append(additionalNodesZones, zone)
case RemoveNodes:
framework.ExpectNoError(removeWorkerNodes(zone))
additionalNodesZones = removeZoneFromZones(additionalNodesZones, zone)
}
framework.ExpectNoError(framework.WaitForMasters(framework.TestContext.CloudConfig.MasterName, c, len(additionalReplicaZones)+1, 10*time.Minute))
framework.ExpectNoError(framework.AllNodesReady(c, 5*time.Minute))
// Verify that API server works correctly with HA master.
rcName := "ha-master-" + strconv.Itoa(len(existingRCs))
createNewRC(c, ns, rcName)
existingRCs = append(existingRCs, rcName)
verifyRCs(c, ns, existingRCs)
}
It("survive addition/removal replicas same zone [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
step(None, "")
numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zone)
}
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zone)
}
})
It("survive addition/removal replicas different zones [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone)
step(None, "")
// If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
numAdditionalReplicas := 2
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)])
}
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)])
}
})
It("survive addition/removal replicas multizone workers [Serial][Disruptive]", func() {
zone := framework.TestContext.CloudConfig.Zone
region := findRegionForZone(zone)
zones := findZonesForRegion(region)
zones = removeZoneFromZones(zones, zone)
step(None, "")
numAdditionalReplicas := 2
// Add worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(AddNodes, zones[i])
}
// Add master repilcas.
//
// If numAdditionalReplicas is larger then the number of remaining zones in the region,
// we create a few masters in the same zone and zone entry is repeated in additionalReplicaZones.
for i := 0; i < numAdditionalReplicas; i++ {
step(AddReplica, zones[i%len(zones)])
}
// Remove master repilcas.
for i := 0; i < numAdditionalReplicas; i++ {
step(RemoveReplica, zones[i%len(zones)])
}
// Remove worker nodes.
for i := 0; i < numAdditionalReplicas && i < len(zones); i++ {
step(RemoveNodes, zones[i])
}
})
})

View file

@ -0,0 +1,196 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
"k8s.io/kubernetes/pkg/api/v1"
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
)
const (
kindRC = "replicationController"
kindDeployment = "deployment"
kindReplicaSet = "replicaset"
subresource = "scale"
)
// These tests don't seem to be running properly in parallel: issue: #20338.
//
var _ = framework.KubeDescribe("[HPA] Horizontal pod autoscaling (scale resource: CPU)", func() {
var rc *ResourceConsumer
f := framework.NewDefaultFramework("horizontal-pod-autoscaling")
titleUp := "Should scale from 1 pod to 3 pods and from 3 to 5"
titleDown := "Should scale from 5 pods to 3 pods and from 3 to 1"
framework.KubeDescribe("[Serial] [Slow] Deployment", func() {
// CPU tests via deployments
It(titleUp, func() {
scaleUp("test-deployment", kindDeployment, false, rc, f)
})
It(titleDown, func() {
scaleDown("test-deployment", kindDeployment, false, rc, f)
})
})
framework.KubeDescribe("[Serial] [Slow] ReplicaSet", func() {
// CPU tests via deployments
It(titleUp, func() {
scaleUp("rs", kindReplicaSet, false, rc, f)
})
It(titleDown, func() {
scaleDown("rs", kindReplicaSet, false, rc, f)
})
})
// These tests take ~20 minutes each.
framework.KubeDescribe("[Serial] [Slow] ReplicationController", func() {
// CPU tests via replication controllers
It(titleUp+" and verify decision stability", func() {
scaleUp("rc", kindRC, true, rc, f)
})
It(titleDown+" and verify decision stability", func() {
scaleDown("rc", kindRC, true, rc, f)
})
})
framework.KubeDescribe("ReplicationController light", func() {
It("Should scale from 1 pod to 2 pods", func() {
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 150,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 2,
}
scaleTest.run("rc-light", kindRC, rc, f)
})
It("Should scale from 2 pods to 1 pod", func() {
scaleTest := &HPAScaleTest{
initPods: 2,
totalInitialCPUUsage: 50,
perPodCPURequest: 200,
targetCPUUtilizationPercent: 50,
minPods: 1,
maxPods: 2,
firstScale: 1,
}
scaleTest.run("rc-light", kindRC, rc, f)
})
})
})
// HPAScaleTest struct is used by the scale(...) function.
type HPAScaleTest struct {
initPods int32
totalInitialCPUUsage int32
perPodCPURequest int64
targetCPUUtilizationPercent int32
minPods int32
maxPods int32
firstScale int32
firstScaleStasis time.Duration
cpuBurst int
secondScale int32
secondScaleStasis time.Duration
}
// run is a method which runs an HPA lifecycle, from a starting state, to an expected
// The initial state is defined by the initPods parameter.
// The first state change is due to the CPU being consumed initially, which HPA responds to by changing pod counts.
// The second state change (optional) is due to the CPU burst parameter, which HPA again responds to.
// TODO The use of 3 states is arbitrary, we could eventually make this test handle "n" states once this test stabilizes.
func (scaleTest *HPAScaleTest) run(name, kind string, rc *ResourceConsumer, f *framework.Framework) {
rc = NewDynamicResourceConsumer(name, kind, int(scaleTest.initPods), int(scaleTest.totalInitialCPUUsage), 0, 0, scaleTest.perPodCPURequest, 200, f)
defer rc.CleanUp()
createCPUHorizontalPodAutoscaler(rc, scaleTest.targetCPUUtilizationPercent, scaleTest.minPods, scaleTest.maxPods)
rc.WaitForReplicas(int(scaleTest.firstScale))
if scaleTest.firstScaleStasis > 0 {
rc.EnsureDesiredReplicas(int(scaleTest.firstScale), scaleTest.firstScaleStasis)
}
if scaleTest.cpuBurst > 0 && scaleTest.secondScale > 0 {
rc.ConsumeCPU(scaleTest.cpuBurst)
rc.WaitForReplicas(int(scaleTest.secondScale))
}
}
func scaleUp(name, kind string, checkStability bool, rc *ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 1,
totalInitialCPUUsage: 250,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 20,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 700,
secondScale: 5,
}
scaleTest.run(name, kind, rc, f)
}
func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *framework.Framework) {
stasis := 0 * time.Minute
if checkStability {
stasis = 10 * time.Minute
}
scaleTest := &HPAScaleTest{
initPods: 5,
totalInitialCPUUsage: 375,
perPodCPURequest: 500,
targetCPUUtilizationPercent: 30,
minPods: 1,
maxPods: 5,
firstScale: 3,
firstScaleStasis: stasis,
cpuBurst: 10,
secondScale: 1,
}
scaleTest.run(name, kind, rc, f)
}
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
hpa := &autoscaling.HorizontalPodAutoscaler{
ObjectMeta: v1.ObjectMeta{
Name: rc.name,
Namespace: rc.framework.Namespace.Name,
},
Spec: autoscaling.HorizontalPodAutoscalerSpec{
ScaleTargetRef: autoscaling.CrossVersionObjectReference{
Kind: rc.kind,
Name: rc.name,
},
MinReplicas: &minReplicas,
MaxReplicas: maxRepl,
TargetCPUUtilizationPercentage: &cpu,
},
}
_, errHPA := rc.framework.ClientSet.Autoscaling().HorizontalPodAutoscalers(rc.framework.Namespace.Name).Create(hpa)
framework.ExpectNoError(errHPA)
}

218
vendor/k8s.io/kubernetes/test/e2e/ingress.go generated vendored Normal file
View file

@ -0,0 +1,218 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"path/filepath"
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
rbacv1alpha1 "k8s.io/kubernetes/pkg/apis/rbac/v1alpha1"
"k8s.io/kubernetes/pkg/serviceaccount"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// parent path to yaml test manifests.
ingressManifestPath = "test/e2e/testing-manifests/ingress"
// timeout on a single http request.
reqTimeout = 10 * time.Second
// healthz port used to verify glbc restarted correctly on the master.
glbcHealthzPort = 8086
// General cloud resource poll timeout (eg: create static ip, firewall etc)
cloudResourcePollTimeout = 5 * time.Minute
// Name of the config-map and key the ingress controller stores its uid in.
uidConfigMap = "ingress-uid"
uidKey = "uid"
// GCE only allows names < 64 characters, and the loadbalancer controller inserts
// a single character of padding.
nameLenLimit = 62
)
var _ = framework.KubeDescribe("Loadbalancing: L7", func() {
defer GinkgoRecover()
var (
ns string
jig *testJig
conformanceTests []conformanceTests
)
f := framework.NewDefaultFramework("ingress")
BeforeEach(func() {
f.BeforeEach()
jig = newTestJig(f.ClientSet)
ns = f.Namespace.Name
// this test wants powerful permissions. Since the namespace names are unique, we can leave this
// lying around so we don't have to race any caches
framework.BindClusterRole(jig.client.Rbac(), "cluster-admin", f.Namespace.Name,
rbacv1alpha1.Subject{Kind: rbacv1alpha1.ServiceAccountKind, Namespace: f.Namespace.Name, Name: "default"})
err := framework.WaitForAuthorizationUpdate(jig.client.Authorization(),
serviceaccount.MakeUsername(f.Namespace.Name, "default"),
"", "create", schema.GroupResource{Resource: "pods"}, true)
framework.ExpectNoError(err)
})
// Before enabling this loadbalancer test in any other test list you must
// make sure the associated project has enough quota. At the time of this
// writing a GCE project is allowed 3 backend services by default. This
// test requires at least 5.
//
// Slow by design ~10m for each "It" block dominated by loadbalancer setup time
// TODO: write similar tests for nginx, haproxy and AWS Ingress.
framework.KubeDescribe("GCE [Slow] [Feature:Ingress]", func() {
var gceController *GCEIngressController
// Platform specific setup
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing gce controller")
gceController = &GCEIngressController{
ns: ns,
c: jig.client,
cloud: framework.TestContext.CloudConfig,
}
gceController.init()
})
// Platform specific cleanup
AfterEach(func() {
if CurrentGinkgoTestDescription().Failed {
describeIng(ns)
}
if jig.ing == nil {
By("No ingress created, no cleanup necessary")
return
}
By("Deleting ingress")
jig.deleteIngress()
By("Cleaning up cloud resources")
cleanupGCE(gceController)
})
It("should conform to Ingress spec", func() {
conformanceTests = createComformanceTests(jig, ns)
for _, t := range conformanceTests {
By(t.entryLog)
t.execute()
By(t.exitLog)
jig.waitForIngress(true)
}
})
It("shoud create ingress with given static-ip", func() {
// ip released when the rest of lb resources are deleted in cleanupGCE
ip := gceController.createStaticIP(ns)
By(fmt.Sprintf("allocated static ip %v: %v through the GCE cloud provider", ns, ip))
jig.createIngress(filepath.Join(ingressManifestPath, "static-ip"), ns, map[string]string{
"kubernetes.io/ingress.global-static-ip-name": ns,
"kubernetes.io/ingress.allow-http": "false",
})
By("waiting for Ingress to come up with ip: " + ip)
httpClient := buildInsecureClient(reqTimeout)
framework.ExpectNoError(pollURL(fmt.Sprintf("https://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, false))
By("should reject HTTP traffic")
framework.ExpectNoError(pollURL(fmt.Sprintf("http://%v/", ip), "", framework.LoadBalancerPollTimeout, jig.pollInterval, httpClient, true))
By("should have correct firewall rule for ingress")
fw := gceController.getFirewallRule()
expFw := jig.constructFirewallForIngress(gceController)
// Passed the last argument as `true` to verify the backend ports is a subset
// of the allowed ports in firewall rule, given there may be other existing
// ingress resources and backends we are not aware of.
Expect(framework.VerifyFirewallRule(fw, expFw, gceController.cloud.Network, true)).NotTo(HaveOccurred())
// TODO: uncomment the restart test once we have a way to synchronize
// and know that the controller has resumed watching. If we delete
// the ingress before the controller is ready we will leak.
// By("restaring glbc")
// restarter := NewRestartConfig(
// framework.GetMasterHost(), "glbc", glbcHealthzPort, restartPollInterval, restartTimeout)
// restarter.restart()
// By("should continue serving on provided static-ip for 30 seconds")
// ExpectNoError(jig.verifyURL(fmt.Sprintf("https://%v/", ip), "", 30, 1*time.Second, httpClient))
})
// TODO: Implement a multizone e2e that verifies traffic reaches each
// zone based on pod labels.
})
// Time: borderline 5m, slow by design
framework.KubeDescribe("Nginx", func() {
var nginxController *NginxIngressController
BeforeEach(func() {
framework.SkipUnlessProviderIs("gce", "gke")
By("Initializing nginx controller")
jig.class = "nginx"
nginxController = &NginxIngressController{ns: ns, c: jig.client}
// TODO: This test may fail on other platforms. We can simply skip it
// but we want to allow easy testing where a user might've hand
// configured firewalls.
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(gcloudCreate("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID, "--allow", "tcp:80,tcp:443", "--network", framework.TestContext.CloudConfig.Network))
} else {
framework.Logf("WARNING: Not running on GCE/GKE, cannot create firewall rules for :80, :443. Assuming traffic can reach the external ips of all nodes in cluster on those ports.")
}
nginxController.init()
})
AfterEach(func() {
if framework.ProviderIs("gce", "gke") {
framework.ExpectNoError(gcloudDelete("firewall-rules", fmt.Sprintf("ingress-80-443-%v", ns), framework.TestContext.CloudConfig.ProjectID))
}
if CurrentGinkgoTestDescription().Failed {
describeIng(ns)
}
if jig.ing == nil {
By("No ingress created, no cleanup necessary")
return
}
By("Deleting ingress")
jig.deleteIngress()
})
It("should conform to Ingress spec", func() {
// Poll more frequently to reduce e2e completion time.
// This test runs in presubmit.
jig.pollInterval = 5 * time.Second
conformanceTests = createComformanceTests(jig, ns)
for _, t := range conformanceTests {
By(t.entryLog)
t.execute()
By(t.exitLog)
jig.waitForIngress(false)
}
})
})
})

1049
vendor/k8s.io/kubernetes/test/e2e/ingress_utils.go generated vendored Normal file

File diff suppressed because it is too large Load diff

72
vendor/k8s.io/kubernetes/test/e2e/initial_resources.go generated vendored Normal file
View file

@ -0,0 +1,72 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"fmt"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
// [Feature:InitialResources]: Initial resources is an experimental feature, so
// these tests are not run by default.
//
// Flaky issue #20272
var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Flaky]", func() {
f := framework.NewDefaultFramework("initial-resources")
It("should set initial resources based on historical data", func() {
// TODO(piosz): Add cleanup data in InfluxDB that left from previous tests.
cpu := 100
mem := 200
for i := 0; i < 10; i++ {
rc := NewStaticResourceConsumer(fmt.Sprintf("ir-%d", i), 1, cpu, mem, 0, int64(2*cpu), int64(2*mem), f)
defer rc.CleanUp()
}
// Wait some time to make sure usage data is gathered.
time.Sleep(10 * time.Minute)
pod := runPod(f, "ir-test", resourceConsumerImage)
r := pod.Spec.Containers[0].Resources.Requests
Expect(r.Cpu().MilliValue()).Should(BeNumerically("~", cpu, 10))
Expect(r.Memory().Value()).Should(BeNumerically("~", mem*1024*1024, 20*1024*1024))
})
})
func runPod(f *framework.Framework, name, image string) *v1.Pod {
pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: name,
Image: image,
},
},
},
}
createdPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
framework.ExpectNoError(err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, createdPod))
return createdPod
}

345
vendor/k8s.io/kubernetes/test/e2e/job.go generated vendored Normal file
View file

@ -0,0 +1,345 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
// How long to wait for a job to finish.
jobTimeout = 15 * time.Minute
// Job selector name
jobSelectorKey = "job"
)
var _ = framework.KubeDescribe("Job", func() {
f := framework.NewDefaultFramework("job")
parallelism := int32(2)
completions := int32(4)
lotsOfFailures := int32(5) // more than completions
// Simplest case: all pods succeed promptly
It("should run a job to completion when tasks succeed", func() {
By("Creating a job")
job := newTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
// Pods sometimes fail, but eventually succeed.
It("should run a job to completion when tasks sometimes fail and are locally restarted", func() {
By("Creating a job")
// One failure, then a success, local restarts.
// We can't use the random failure approach used by the
// non-local test below, because kubelet will throttle
// frequently failing containers in a given pod, ramping
// up to 5 minutes between restarts, making test timeouts
// due to successive failures too likely with a reasonable
// test timeout.
job := newTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
// Pods sometimes fail, but eventually succeed, after pod restarts
It("should run a job to completion when tasks sometimes fail and are not locally restarted", func() {
By("Creating a job")
// 50% chance of container success, local restarts.
// Can't use the failOnce approach because that relies
// on an emptyDir, which is not preserved across new pods.
// Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare.
job := newTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job reaches completions")
err = waitForJobFinish(f.ClientSet, f.Namespace.Name, job.Name, completions)
Expect(err).NotTo(HaveOccurred())
})
It("should keep restarting failed pods", func() {
By("Creating a job")
job := newTestJob("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job shows many failures")
err = wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := getJob(f.ClientSet, f.Namespace.Name, job.Name)
if err != nil {
return false, err
}
return curr.Status.Failed > lotsOfFailures, nil
})
})
It("should scale a job up", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job up")
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should scale a job down", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
job := newTestJob("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == startParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, startParallelism)
Expect(err).NotTo(HaveOccurred())
By("scale job down")
scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
err = scaler.Scale(f.Namespace.Name, job.Name, uint(endParallelism), nil, waitForScale, waitForReplicas)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == endParallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, endParallelism)
Expect(err).NotTo(HaveOccurred())
})
It("should delete a job", func() {
By("Creating a job")
job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring active pods == parallelism")
err = waitForAllPodsRunning(f.ClientSet, f.Namespace.Name, job.Name, parallelism)
Expect(err).NotTo(HaveOccurred())
By("delete a job")
reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring job was deleted")
_, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).To(HaveOccurred())
Expect(errors.IsNotFound(err)).To(BeTrue())
})
It("should fail a job", func() {
By("Creating a job")
job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
By("Ensuring job was failed")
err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, 20*time.Second)
if err == wait.ErrWaitTimeout {
job, err = getJob(f.ClientSet, f.Namespace.Name, job.Name)
Expect(err).NotTo(HaveOccurred())
// the job stabilized and won't be synced until modification or full
// resync happens, we don't want to wait for the latter so we force
// sync modifying it
_, err = framework.UpdateJobWithRetries(f.ClientSet, f.Namespace.Name, job.Name, func(update *batch.Job) {
update.Spec.Parallelism = &completions
})
Expect(err).NotTo(HaveOccurred())
err = waitForJobFail(f.ClientSet, f.Namespace.Name, job.Name, jobTimeout)
}
Expect(err).NotTo(HaveOccurred())
})
})
// newTestJob returns a job which does one of several testing behaviors.
func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
job := &batch.Job{
ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: batch.JobSpec{
Parallelism: &parallelism,
Completions: &completions,
ManualSelector: newBool(false),
Template: v1.PodTemplateSpec{
ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{jobSelectorKey: name},
},
Spec: v1.PodSpec{
RestartPolicy: rPol,
Volumes: []v1.Volume{
{
Name: "data",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{},
VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
},
},
},
},
},
},
},
}
switch behavior {
case "notTerminate":
job.Spec.Template.Spec.Containers[0].Command = []string{"sleep", "1000000"}
case "fail":
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 1"}
case "succeed":
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit 0"}
case "randomlySucceedOrFail":
// Bash's $RANDOM generates pseudorandom int in range 0 - 32767.
// Dividing by 16384 gives roughly 50/50 chance of success.
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "exit $(( $RANDOM / 16384 ))"}
case "failOnce":
// Fail the first the container of the pod is run, and
// succeed the second time. Checks for file on emptydir.
// If present, succeed. If not, create but fail.
// Note that this cannot be used with RestartNever because
// it always fails the first time for a pod.
job.Spec.Template.Spec.Containers[0].Command = []string{"/bin/sh", "-c", "if [[ -r /data/foo ]] ; then exit 0 ; else touch /data/foo ; exit 1 ; fi"}
}
return job
}
func getJob(c clientset.Interface, ns, name string) (*batch.Job, error) {
return c.Batch().Jobs(ns).Get(name, metav1.GetOptions{})
}
func createJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Batch().Jobs(ns).Create(job)
}
func updateJob(c clientset.Interface, ns string, job *batch.Job) (*batch.Job, error) {
return c.Batch().Jobs(ns).Update(job)
}
func deleteJob(c clientset.Interface, ns, name string) error {
return c.Batch().Jobs(ns).Delete(name, nil)
}
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
count := int32(0)
for _, p := range pods.Items {
if p.Status.Phase == v1.PodRunning {
count++
}
}
return count == parallelism, nil
})
}
// Wait for job to reach completions.
func waitForJobFinish(c clientset.Interface, ns, jobName string, completions int32) error {
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
return curr.Status.Succeeded == completions, nil
})
}
// Wait for job fail.
func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Duration) error {
return wait.Poll(framework.Poll, timeout, func() (bool, error) {
curr, err := c.Batch().Jobs(ns).Get(jobName, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, c := range curr.Status.Conditions {
if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
return true, nil
}
}
return false, nil
})
}
func newBool(val bool) *bool {
p := new(bool)
*p = val
return p
}

112
vendor/k8s.io/kubernetes/test/e2e/kibana_logging.go generated vendored Normal file
View file

@ -0,0 +1,112 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var _ = framework.KubeDescribe("Kibana Logging Instances Is Alive [Feature:Elasticsearch]", func() {
f := framework.NewDefaultFramework("kibana-logging")
BeforeEach(func() {
// TODO: For now assume we are only testing cluster logging with Elasticsearch
// and Kibana on GCE. Once we are sure that Elasticsearch and Kibana cluster level logging
// works for other providers we should widen this scope of this test.
framework.SkipUnlessProviderIs("gce")
})
It("should check that the Kibana logging instance is alive", func() {
ClusterLevelLoggingWithKibana(f)
})
})
const (
kibanaKey = "k8s-app"
kibanaValue = "kibana-logging"
)
// ClusterLevelLoggingWithKibana is an end to end test that checks to see if Kibana is alive.
func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// graceTime is how long to keep retrying requests for status information.
const graceTime = 20 * time.Minute
// Check for the existence of the Kibana service.
By("Checking the Kibana service exists.")
s := f.ClientSet.Core().Services(api.NamespaceSystem)
// Make a few attempts to connect. This makes the test robust against
// being run as the first e2e test just after the e2e cluster has been created.
var err error
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
if _, err = s.Get("kibana-logging", metav1.GetOptions{}); err == nil {
break
}
framework.Logf("Attempt to check for the existence of the Kibana service failed after %v", time.Since(start))
}
Expect(err).NotTo(HaveOccurred())
// Wait for the Kibana pod(s) to enter the running state.
By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
err = framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)
Expect(err).NotTo(HaveOccurred())
}
By("Checking to make sure we get a response from the Kibana UI.")
err = nil
for start := time.Now(); time.Since(start) < graceTime; time.Sleep(5 * time.Second) {
proxyRequest, errProxy := framework.GetServicesProxyRequest(f.ClientSet, f.ClientSet.Core().RESTClient().Get())
if errProxy != nil {
framework.Logf("After %v failed to get services proxy request: %v", time.Since(start), errProxy)
err = errProxy
continue
}
ctx, cancel := context.WithTimeout(context.Background(), framework.SingleCallTimeout)
defer cancel()
// Query against the root URL for Kibana.
_, err = proxyRequest.Namespace(api.NamespaceSystem).
Context(ctx).
Name("kibana-logging").
DoRaw()
if err != nil {
if ctx.Err() != nil {
framework.Failf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
break
}
framework.Logf("After %v proxy call to kibana-logging failed: %v", time.Since(start), err)
continue
}
break
}
Expect(err).NotTo(HaveOccurred())
}

206
vendor/k8s.io/kubernetes/test/e2e/kube_proxy.go generated vendored Normal file
View file

@ -0,0 +1,206 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"encoding/json"
"fmt"
"math"
"strconv"
"strings"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/images/net/nat"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const kubeProxyE2eImage = "gcr.io/google_containers/e2e-net-amd64:1.0"
var _ = framework.KubeDescribe("Network", func() {
const (
testDaemonHttpPort = 11301
testDaemonTcpPort = 11302
timeoutSeconds = 10
postFinTimeoutSeconds = 5
)
fr := framework.NewDefaultFramework("network")
It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := framework.CollectAddresses(nodes, v1.NodeInternalIP)
if len(nodes.Items) < 2 {
framework.Skipf(
"Test requires >= 2 Ready nodes, but there are only %v nodes",
len(nodes.Items))
}
type NodeInfo struct {
node *v1.Node
name string
nodeIp string
}
clientNodeInfo := NodeInfo{
node: &nodes.Items[0],
name: nodes.Items[0].Name,
nodeIp: ips[0],
}
serverNodeInfo := NodeInfo{
node: &nodes.Items[1],
name: nodes.Items[1].Name,
nodeIp: ips[1],
}
zero := int64(0)
clientPodSpec := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-client",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-client"},
},
Spec: v1.PodSpec{
NodeName: clientNodeInfo.name,
Containers: []v1.Container{
{
Name: "e2e-net-client",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net", "-serve", fmt.Sprintf("0.0.0.0:%d", testDaemonHttpPort),
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
serverPodSpec := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-server",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-server"},
},
Spec: v1.PodSpec{
NodeName: serverNodeInfo.name,
Containers: []v1.Container{
{
Name: "e2e-net-server",
Image: kubeProxyE2eImage,
ImagePullPolicy: "Always",
Command: []string{
"/net",
"-runner", "nat-closewait-server",
"-options",
fmt.Sprintf(`{"LocalAddr":"0.0.0.0:%v", "PostFindTimeoutSeconds":%v}`,
testDaemonTcpPort,
postFinTimeoutSeconds),
},
Ports: []v1.ContainerPort{
{
Name: "tcp",
ContainerPort: testDaemonTcpPort,
HostPort: testDaemonTcpPort,
},
},
},
},
TerminationGracePeriodSeconds: &zero,
},
}
By(fmt.Sprintf(
"Launching a server daemon on node %v (node ip: %v, image: %v)",
serverNodeInfo.name,
serverNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(serverPodSpec)
By(fmt.Sprintf(
"Launching a client daemon on node %v (node ip: %v, image: %v)",
clientNodeInfo.name,
clientNodeInfo.nodeIp,
kubeProxyE2eImage))
fr.PodClient().CreateSync(clientPodSpec)
By("Make client connect")
options := nat.CloseWaitClientOptions{
RemoteAddr: fmt.Sprintf("%v:%v",
serverNodeInfo.nodeIp, testDaemonTcpPort),
TimeoutSeconds: timeoutSeconds,
PostFinTimeoutSeconds: 0,
LeakConnection: true,
}
jsonBytes, err := json.Marshal(options)
cmd := fmt.Sprintf(
`curl -X POST http://localhost:%v/run/nat-closewait-client -d `+
`'%v' 2>/dev/null`,
testDaemonHttpPort,
string(jsonBytes))
framework.RunHostCmdOrDie(fr.Namespace.Name, "e2e-net-client", cmd)
<-time.After(time.Duration(1) * time.Second)
By("Checking /proc/net/nf_conntrack for the timeout")
// If test flakes occur here, then this check should be performed
// in a loop as there may be a race with the client connecting.
framework.IssueSSHCommandWithResult(
fmt.Sprintf("sudo cat /proc/net/ip_conntrack | grep 'dport=%v'",
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
// Timeout in seconds is available as the third column from
// /proc/net/ip_conntrack.
result, err := framework.IssueSSHCommandWithResult(
fmt.Sprintf(
"sudo cat /proc/net/ip_conntrack "+
"| grep 'CLOSE_WAIT.*dst=%v.*dport=%v' "+
"| tail -n 1"+
"| awk '{print $3}' ",
serverNodeInfo.nodeIp,
testDaemonTcpPort),
framework.TestContext.Provider,
clientNodeInfo.node)
framework.ExpectNoError(err)
timeoutSeconds, err := strconv.Atoi(strings.TrimSpace(result.Stdout))
framework.ExpectNoError(err)
// These must be synchronized from the default values set in
// pkg/apis/../defaults.go ConntrackTCPCloseWaitTimeout. The
// current defaults are hidden in the initialization code.
const epsilonSeconds = 60
const expectedTimeoutSeconds = 60 * 60
framework.Logf("conntrack entry timeout was: %v, expected: %v",
timeoutSeconds, expectedTimeoutSeconds)
Expect(math.Abs(float64(timeoutSeconds - expectedTimeoutSeconds))).Should(
BeNumerically("<", (epsilonSeconds)))
})
})

Some files were not shown because too many files have changed in this diff Show more