Switch to github.com/golang/dep for vendoring

Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
This commit is contained in:
Mrunal Patel 2017-01-31 16:45:59 -08:00
parent d6ab91be27
commit 8e5b17cf13
15431 changed files with 3971413 additions and 8881 deletions

View file

@ -0,0 +1,59 @@
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["util.go"],
tags = ["automanaged"],
deps = [
"//pkg/api:go_default_library",
"//pkg/api/v1:go_default_library",
"//pkg/client/clientset_generated/clientset:go_default_library",
"//pkg/client/clientset_generated/clientset/typed/core/v1:go_default_library",
"//pkg/client/record:go_default_library",
"//pkg/client/restclient:go_default_library",
"//plugin/pkg/scheduler:go_default_library",
"//plugin/pkg/scheduler/algorithmprovider:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//vendor:github.com/golang/glog",
],
)
go_test(
name = "go_default_test",
srcs = [
"scheduler_bench_test.go",
"scheduler_test.go",
],
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1:go_default_library",
"//plugin/pkg/scheduler/factory:go_default_library",
"//test/integration/framework:go_default_library",
"//test/utils:go_default_library",
"//vendor:github.com/golang/glog",
"//vendor:github.com/renstrom/dedent",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View file

@ -0,0 +1,45 @@
Scheduler Performance Test
======
Motivation
------
We already have a performance testing system -- Kubemark. However, Kubemark requires setting up and bootstrapping a whole cluster, which takes a lot of time.
We want to have a standard way to reproduce scheduling latency metrics result and benchmark scheduler as simple and fast as possible. We have the following goals:
- Save time on testing
- The test and benchmark can be run in a single box.
We only set up components necessary to scheduling without booting up a cluster.
- Profiling runtime metrics to find out bottleneck
- Write scheduler integration test but focus on performance measurement.
Take advantage of go profiling tools and collect fine-grained metrics,
like cpu-profiling, memory-profiling and block-profiling.
- Reproduce test result easily
- We want to have a known place to do the performance related test for scheduler.
Developers should just run one script to collect all the information they need.
Currently the test suite has the following:
- density test (by adding a new Go test)
- schedule 30k pods on 1000 (fake) nodes and 3k pods on 100 (fake) nodes
- print out scheduling rate every second
- let you learn the rate changes vs number of scheduled pods
- benchmark
- make use of `go test -bench` and report nanosecond/op.
- schedule b.N pods when the cluster has N nodes and P scheduled pods. Since it takes relatively long time to finish one round, b.N is small: 10 - 100.
How To Run
------
```
cd kubernetes/test/component/scheduler/perf
./test-performance.sh
```
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/component/scheduler/perf/README.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->
[![Analytics](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/test/integration/scheduler_perf/README.md?pixel)]()

View file

@ -0,0 +1,100 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"testing"
"time"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
)
// BenchmarkScheduling100Nodes0Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 0 scheduled pods
func BenchmarkScheduling100Nodes0Pods(b *testing.B) {
benchmarkScheduling(100, 0, b)
}
// BenchmarkScheduling100Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 100 nodes and 1000 scheduled pods
func BenchmarkScheduling100Nodes1000Pods(b *testing.B) {
benchmarkScheduling(100, 1000, b)
}
// BenchmarkScheduling1000Nodes0Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 0 scheduled pods
func BenchmarkScheduling1000Nodes0Pods(b *testing.B) {
benchmarkScheduling(1000, 0, b)
}
// BenchmarkScheduling1000Nodes1000Pods benchmarks the scheduling rate
// when the cluster has 1000 nodes and 1000 scheduled pods
func BenchmarkScheduling1000Nodes1000Pods(b *testing.B) {
benchmarkScheduling(1000, 1000, b)
}
// benchmarkScheduling benchmarks scheduling rate with specific number of nodes
// and specific number of pods already scheduled. Since an operation takes relatively
// long time, b.N should be small: 10 - 100.
func benchmarkScheduling(numNodes, numScheduledPods int, b *testing.B) {
schedulerConfigFactory, finalFunc := mustSetupScheduler()
defer finalFunc()
c := schedulerConfigFactory.Client
nodePreparer := framework.NewIntegrationTestNodePreparer(
c,
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
"scheduler-perf-",
)
if err := nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer nodePreparer.CleanupNodes()
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numScheduledPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
podCreator := testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods {
break
}
time.Sleep(1 * time.Second)
}
// start benchmark
b.ResetTimer()
config = testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", b.N, testutils.NewSimpleWithControllerCreatePodStrategy("rc2"))
podCreator = testutils.NewTestPodCreator(c, config)
podCreator.CreatePods()
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := schedulerConfigFactory.ScheduledPodLister.Indexer.List()
if len(scheduled) >= numScheduledPods+b.N {
break
}
// Note: This might introduce slight deviation in accuracy of benchmark results.
// Since the total amount of time is relatively large, it might not be a concern.
time.Sleep(100 * time.Millisecond)
}
}

View file

@ -0,0 +1,241 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"fmt"
"math"
"testing"
"time"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
testutils "k8s.io/kubernetes/test/utils"
"github.com/golang/glog"
"github.com/renstrom/dedent"
)
const (
threshold3K = 100
threshold30K = 30
threshold60K = 30
)
// TestSchedule100Node3KPods schedules 3k pods on 100 nodes.
func TestSchedule100Node3KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := defaultSchedulerBenchmarkConfig(100, 3000)
if min := schedulePods(config); min < threshold3K {
t.Errorf("Too small pod scheduling throughput for 3k pods. Expected %v got %v", threshold3K, min)
} else {
fmt.Printf("Minimal observed throughput for 3k pod test: %v\n", min)
}
}
// TestSchedule100Node3KNodeAffinityPods schedules 3k pods using Node affinity on 100 nodes.
func TestSchedule100Node3KNodeAffinityPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := baseConfig()
config.numNodes = 100
config.numPods = 3000
// number of Node-Pod sets with Pods NodeAffinity matching given Nodes.
numGroups := 10
nodeAffinityKey := "kubernetes.io/sched-perf-node-affinity"
nodeStrategies := make([]testutils.CountToStrategy, 0, 10)
for i := 0; i < numGroups; i++ {
nodeStrategies = append(nodeStrategies, testutils.CountToStrategy{
Count: config.numNodes / numGroups,
Strategy: testutils.NewLabelNodePrepareStrategy(nodeAffinityKey, fmt.Sprintf("%v", i)),
})
}
config.nodePreparer = framework.NewIntegrationTestNodePreparer(
config.schedulerConfigFactory.Client,
nodeStrategies,
"scheduler-perf-",
)
affinityTemplate := dedent.Dedent(`
{
"nodeAffinity": {
"requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
"key": "` + nodeAffinityKey + `",
"operator": "In",
"values": ["%v"]
}]
}]
}
}
}`)
podCreatorConfig := testutils.NewTestPodCreatorConfig()
for i := 0; i < numGroups; i++ {
podCreatorConfig.AddStrategy("sched-perf-node-affinity", config.numPods/numGroups,
testutils.NewCustomCreatePodStrategy(&v1.Pod{
ObjectMeta: v1.ObjectMeta{
GenerateName: "sched-perf-node-affinity-pod-",
Annotations: map[string]string{v1.AffinityAnnotationKey: fmt.Sprintf(affinityTemplate, i)},
},
Spec: testutils.MakePodSpec(),
}),
)
}
config.podCreator = testutils.NewTestPodCreator(config.schedulerConfigFactory.Client, podCreatorConfig)
if min := schedulePods(config); min < threshold30K {
t.Errorf("Too small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
} else {
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
}
}
// TestSchedule1000Node30KPods schedules 30k pods on 1000 nodes.
func TestSchedule1000Node30KPods(t *testing.T) {
if testing.Short() {
t.Skip("Skipping because we want to run short tests")
}
config := defaultSchedulerBenchmarkConfig(1000, 30000)
if min := schedulePods(config); min < threshold30K {
t.Errorf("To small pod scheduling throughput for 30k pods. Expected %v got %v", threshold30K, min)
} else {
fmt.Printf("Minimal observed throughput for 30k pod test: %v\n", min)
}
}
// TestSchedule2000Node60KPods schedules 60k pods on 2000 nodes.
// This test won't fit in normal 10 minutes time window.
// func TestSchedule2000Node60KPods(t *testing.T) {
// if testing.Short() {
// t.Skip("Skipping because we want to run short tests")
// }
// config := defaultSchedulerBenchmarkConfig(2000, 60000)
// if min := schedulePods(config); min < threshold60K {
// t.Errorf("To small pod scheduling throughput for 60k pods. Expected %v got %v", threshold60K, min)
// } else {
// fmt.Printf("Minimal observed throughput for 60k pod test: %v\n", min)
// }
// }
type testConfig struct {
numPods int
numNodes int
nodePreparer testutils.TestNodePreparer
podCreator *testutils.TestPodCreator
schedulerConfigFactory *factory.ConfigFactory
destroyFunc func()
}
func baseConfig() *testConfig {
schedulerConfigFactory, destroyFunc := mustSetupScheduler()
return &testConfig{
schedulerConfigFactory: schedulerConfigFactory,
destroyFunc: destroyFunc,
}
}
func defaultSchedulerBenchmarkConfig(numNodes, numPods int) *testConfig {
baseConfig := baseConfig()
nodePreparer := framework.NewIntegrationTestNodePreparer(
baseConfig.schedulerConfigFactory.Client,
[]testutils.CountToStrategy{{Count: numNodes, Strategy: &testutils.TrivialNodePrepareStrategy{}}},
"scheduler-perf-",
)
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy("sched-test", numPods, testutils.NewSimpleWithControllerCreatePodStrategy("rc1"))
podCreator := testutils.NewTestPodCreator(baseConfig.schedulerConfigFactory.Client, config)
baseConfig.nodePreparer = nodePreparer
baseConfig.podCreator = podCreator
baseConfig.numPods = numPods
baseConfig.numNodes = numNodes
return baseConfig
}
// schedulePods schedules specific number of pods on specific number of nodes.
// This is used to learn the scheduling throughput on various
// sizes of cluster and changes as more and more pods are scheduled.
// It won't stop until all pods are scheduled.
// It returns the minimum of throughput over whole run.
func schedulePods(config *testConfig) int32 {
defer config.destroyFunc()
if err := config.nodePreparer.PrepareNodes(); err != nil {
glog.Fatalf("%v", err)
}
defer config.nodePreparer.CleanupNodes()
config.podCreator.CreatePods()
prev := 0
// On startup there may be a latent period where NO scheduling occurs (qps = 0).
// We are interested in low scheduling rates (i.e. qps=2),
minQps := int32(math.MaxInt32)
start := time.Now()
// Bake in time for the first pod scheduling event.
for {
time.Sleep(50 * time.Millisecond)
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
// 30,000 pods -> wait till @ least 300 are scheduled to start measuring.
// TODO Find out why sometimes there may be scheduling blips in the beggining.
if len(scheduled) > config.numPods/100 {
break
}
}
// map minimum QPS entries in a counter, useful for debugging tests.
qpsStats := map[int]int{}
// Now that scheduling has started, lets start taking the pulse on how many pods are happening per second.
for {
// This can potentially affect performance of scheduler, since List() is done under mutex.
// Listing 10000 pods is an expensive operation, so running it frequently may impact scheduler.
// TODO: Setup watch on apiserver and wait until all pods scheduled.
scheduled := config.schedulerConfigFactory.ScheduledPodLister.Indexer.List()
// We will be completed when all pods are done being scheduled.
// return the worst-case-scenario interval that was seen during this time.
// Note this should never be low due to cold-start, so allow bake in sched time if necessary.
if len(scheduled) >= config.numPods {
fmt.Printf("Scheduled %v Pods in %v seconds (%v per second on average). min QPS was %v\n",
config.numPods, int(time.Since(start)/time.Second), config.numPods/int(time.Since(start)/time.Second), minQps)
return minQps
}
// There's no point in printing it for the last iteration, as the value is random
qps := len(scheduled) - prev
qpsStats[qps] += 1
if int32(qps) < minQps {
minQps = int32(qps)
}
fmt.Printf("%ds\trate: %d\ttotal: %d (qps frequency: %v)\n", time.Since(start)/time.Second, qps, len(scheduled), qpsStats)
prev = len(scheduled)
time.Sleep(1 * time.Second)
}
}

View file

@ -0,0 +1,52 @@
#!/usr/bin/env bash
# Copyright 2014 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -o errexit
set -o nounset
set -o pipefail
KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../../../
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
DIR_BASENAME=$(dirname "${BASH_SOURCE}")
pushd ${DIR_BASENAME}
cleanup() {
popd 2> /dev/null
kube::etcd::cleanup
kube::log::status "performance test cleanup complete"
}
trap cleanup EXIT
kube::etcd::start
# We are using the benchmark suite to do profiling. Because it only runs a few pods and
# theoretically it has less variance.
if ${RUN_BENCHMARK:-false}; then
kube::log::status "performance test (benchmark) compiling"
go test -c -o "perf.test"
kube::log::status "performance test (benchmark) start"
"./perf.test" -test.bench=. -test.run=xxxx -test.cpuprofile=prof.out -test.short=false
kube::log::status "...benchmark tests finished"
fi
# Running density tests. It might take a long time.
kube::log::status "performance test (density) start"
go test -test.run=. -test.timeout=60m -test.short=false
kube::log::status "...density tests finished"

View file

@ -0,0 +1,78 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package benchmark
import (
"net/http"
"net/http/httptest"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
"k8s.io/kubernetes/test/integration/framework"
)
// mustSetupScheduler starts the following components:
// - k8s api server (a.k.a. master)
// - scheduler
// It returns scheduler config factory and destroyFunc which should be used to
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
func mustSetupScheduler() (schedulerConfigFactory *factory.ConfigFactory, destroyFunc func()) {
h := &framework.MasterHolder{Initialized: make(chan struct{})}
s := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
<-h.Initialized
h.M.GenericAPIServer.Handler.ServeHTTP(w, req)
}))
framework.RunAMasterUsingServer(framework.NewIntegrationTestMasterConfig(), s, h)
clientSet := clientset.NewForConfigOrDie(&restclient.Config{
Host: s.URL,
ContentConfig: restclient.ContentConfig{GroupVersion: &api.Registry.GroupOrDie(v1.GroupName).GroupVersion},
QPS: 5000.0,
Burst: 5000,
})
schedulerConfigFactory = factory.NewConfigFactory(clientSet, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
schedulerConfig, err := schedulerConfigFactory.Create()
if err != nil {
panic("Couldn't create scheduler config")
}
eventBroadcaster := record.NewBroadcaster()
schedulerConfig.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"})
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: clientSet.Core().Events("")})
scheduler.New(schedulerConfig).Run()
destroyFunc = func() {
glog.Infof("destroying")
close(schedulerConfig.StopEverything)
s.Close()
glog.Infof("destroyed")
}
return
}