8e5b17cf13
Signed-off-by: Mrunal Patel <mrunalp@gmail.com>
202 lines
6.4 KiB
Go
202 lines
6.4 KiB
Go
/*
|
|
Copyright 2014 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
// Package app implements a Server object for running the scheduler.
|
|
package app
|
|
|
|
import (
|
|
"fmt"
|
|
"io/ioutil"
|
|
"net"
|
|
"net/http"
|
|
"net/http/pprof"
|
|
"os"
|
|
goruntime "runtime"
|
|
"strconv"
|
|
|
|
"k8s.io/apimachinery/pkg/runtime"
|
|
"k8s.io/apiserver/pkg/healthz"
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
|
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/core/v1"
|
|
"k8s.io/kubernetes/pkg/client/leaderelection"
|
|
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
|
"k8s.io/kubernetes/pkg/client/record"
|
|
"k8s.io/kubernetes/pkg/client/restclient"
|
|
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
|
"k8s.io/kubernetes/pkg/util/configz"
|
|
"k8s.io/kubernetes/plugin/cmd/kube-scheduler/app/options"
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
|
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
|
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
|
latestschedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api/latest"
|
|
"k8s.io/kubernetes/plugin/pkg/scheduler/factory"
|
|
|
|
"github.com/golang/glog"
|
|
"github.com/prometheus/client_golang/prometheus"
|
|
"github.com/spf13/cobra"
|
|
"github.com/spf13/pflag"
|
|
)
|
|
|
|
// NewSchedulerCommand creates a *cobra.Command object with default parameters
|
|
func NewSchedulerCommand() *cobra.Command {
|
|
s := options.NewSchedulerServer()
|
|
s.AddFlags(pflag.CommandLine)
|
|
cmd := &cobra.Command{
|
|
Use: "kube-scheduler",
|
|
Long: `The Kubernetes scheduler is a policy-rich, topology-aware,
|
|
workload-specific function that significantly impacts availability, performance,
|
|
and capacity. The scheduler needs to take into account individual and collective
|
|
resource requirements, quality of service requirements, hardware/software/policy
|
|
constraints, affinity and anti-affinity specifications, data locality, inter-workload
|
|
interference, deadlines, and so on. Workload-specific requirements will be exposed
|
|
through the API as necessary.`,
|
|
Run: func(cmd *cobra.Command, args []string) {
|
|
},
|
|
}
|
|
|
|
return cmd
|
|
}
|
|
|
|
// Run runs the specified SchedulerServer. This should never exit.
|
|
func Run(s *options.SchedulerServer) error {
|
|
kubecli, err := createClient(s)
|
|
if err != nil {
|
|
return fmt.Errorf("unable to create kube client: %v", err)
|
|
}
|
|
config, err := createConfig(s, kubecli)
|
|
if err != nil {
|
|
return fmt.Errorf("failed to create scheduler configuration: %v", err)
|
|
}
|
|
sched := scheduler.New(config)
|
|
|
|
go startHTTP(s)
|
|
|
|
run := func(_ <-chan struct{}) {
|
|
sched.Run()
|
|
select {}
|
|
}
|
|
|
|
if !s.LeaderElection.LeaderElect {
|
|
run(nil)
|
|
panic("unreachable")
|
|
}
|
|
|
|
id, err := os.Hostname()
|
|
if err != nil {
|
|
return fmt.Errorf("unable to get hostname: %v", err)
|
|
}
|
|
// TODO: enable other lock types
|
|
rl := &resourcelock.EndpointsLock{
|
|
EndpointsMeta: v1.ObjectMeta{
|
|
Namespace: "kube-system",
|
|
Name: "kube-scheduler",
|
|
},
|
|
Client: kubecli,
|
|
LockConfig: resourcelock.ResourceLockConfig{
|
|
Identity: id,
|
|
EventRecorder: config.Recorder,
|
|
},
|
|
}
|
|
leaderelection.RunOrDie(leaderelection.LeaderElectionConfig{
|
|
Lock: rl,
|
|
LeaseDuration: s.LeaderElection.LeaseDuration.Duration,
|
|
RenewDeadline: s.LeaderElection.RenewDeadline.Duration,
|
|
RetryPeriod: s.LeaderElection.RetryPeriod.Duration,
|
|
Callbacks: leaderelection.LeaderCallbacks{
|
|
OnStartedLeading: run,
|
|
OnStoppedLeading: func() {
|
|
glog.Fatalf("lost master")
|
|
},
|
|
},
|
|
})
|
|
panic("unreachable")
|
|
}
|
|
|
|
func startHTTP(s *options.SchedulerServer) {
|
|
mux := http.NewServeMux()
|
|
healthz.InstallHandler(mux)
|
|
if s.EnableProfiling {
|
|
mux.HandleFunc("/debug/pprof/", pprof.Index)
|
|
mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
|
|
mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
|
|
if s.EnableContentionProfiling {
|
|
goruntime.SetBlockProfileRate(1)
|
|
}
|
|
}
|
|
if c, err := configz.New("componentconfig"); err == nil {
|
|
c.Set(s.KubeSchedulerConfiguration)
|
|
} else {
|
|
glog.Errorf("unable to register configz: %s", err)
|
|
}
|
|
configz.InstallHandler(mux)
|
|
mux.Handle("/metrics", prometheus.Handler())
|
|
|
|
server := &http.Server{
|
|
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
|
|
Handler: mux,
|
|
}
|
|
glog.Fatal(server.ListenAndServe())
|
|
}
|
|
|
|
func createClient(s *options.SchedulerServer) (*clientset.Clientset, error) {
|
|
kubeconfig, err := clientcmd.BuildConfigFromFlags(s.Master, s.Kubeconfig)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("unable to build config from flags: %v", err)
|
|
}
|
|
|
|
kubeconfig.ContentType = s.ContentType
|
|
// Override kubeconfig qps/burst settings from flags
|
|
kubeconfig.QPS = s.KubeAPIQPS
|
|
kubeconfig.Burst = int(s.KubeAPIBurst)
|
|
|
|
cli, err := clientset.NewForConfig(restclient.AddUserAgent(kubeconfig, "leader-election"))
|
|
if err != nil {
|
|
return nil, fmt.Errorf("invalid API configuration: %v", err)
|
|
}
|
|
return cli, nil
|
|
}
|
|
|
|
func createConfig(s *options.SchedulerServer, kubecli *clientset.Clientset) (*scheduler.Config, error) {
|
|
configFactory := factory.NewConfigFactory(kubecli, s.SchedulerName, s.HardPodAffinitySymmetricWeight, s.FailureDomains)
|
|
if _, err := os.Stat(s.PolicyConfigFile); err == nil {
|
|
var (
|
|
policy schedulerapi.Policy
|
|
configData []byte
|
|
)
|
|
configData, err := ioutil.ReadFile(s.PolicyConfigFile)
|
|
if err != nil {
|
|
return nil, fmt.Errorf("unable to read policy config: %v", err)
|
|
}
|
|
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
|
return nil, fmt.Errorf("invalid configuration: %v", err)
|
|
}
|
|
return configFactory.CreateFromConfig(policy)
|
|
}
|
|
|
|
// if the config file isn't provided, use the specified (or default) provider
|
|
config, err := configFactory.CreateFromProvider(s.AlgorithmProvider)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
eventBroadcaster := record.NewBroadcaster()
|
|
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
|
|
eventBroadcaster.StartLogging(glog.Infof)
|
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubecli.Core().Events("")})
|
|
|
|
return config, nil
|
|
}
|