Vendor: Update k8s version
Signed-off-by: Michał Żyłowski <michal.zylowski@intel.com>
This commit is contained in:
parent
dfa93414c5
commit
52baf68d50
3756 changed files with 113013 additions and 92675 deletions
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/BUILD
generated
vendored
|
@ -32,10 +32,12 @@ filegroup(
|
|||
":package-srcs",
|
||||
"//cmd/kubeadm/app/apis/kubeadm:all-srcs",
|
||||
"//cmd/kubeadm/app/cmd:all-srcs",
|
||||
"//cmd/kubeadm/app/constants:all-srcs",
|
||||
"//cmd/kubeadm/app/discovery:all-srcs",
|
||||
"//cmd/kubeadm/app/images:all-srcs",
|
||||
"//cmd/kubeadm/app/master:all-srcs",
|
||||
"//cmd/kubeadm/app/node:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/apiconfig:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/certs:all-srcs",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:all-srcs",
|
||||
"//cmd/kubeadm/app/preflight:all-srcs",
|
||||
|
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/BUILD
generated
vendored
|
@ -17,7 +17,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
|
|
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/BUILD
generated
vendored
|
@ -17,7 +17,10 @@ go_library(
|
|||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//pkg/apimachinery/announced:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apimachinery/announced",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apimachinery/registered",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
12
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/install.go
generated
vendored
12
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/install/install.go
generated
vendored
|
@ -17,12 +17,20 @@ limitations under the License.
|
|||
package install
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/apimachinery/announced"
|
||||
"k8s.io/apimachinery/pkg/apimachinery/registered"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/announced"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
func init() {
|
||||
Install(api.GroupFactoryRegistry, api.Registry, api.Scheme)
|
||||
}
|
||||
|
||||
// Install registers the API group and adds types to a scheme
|
||||
func Install(groupFactoryRegistry announced.APIGroupFactoryRegistry, registry *registered.APIRegistrationManager, scheme *runtime.Scheme) {
|
||||
if err := announced.NewGroupMetaFactory(
|
||||
&announced.GroupMetaFactoryArgs{
|
||||
GroupName: kubeadm.GroupName,
|
||||
|
@ -33,7 +41,7 @@ func init() {
|
|||
announced.VersionToSchemeFunc{
|
||||
v1alpha1.SchemeGroupVersion.Version: v1alpha1.AddToScheme,
|
||||
},
|
||||
).Announce().RegisterAndEnable(); err != nil {
|
||||
).Announce(groupFactoryRegistry).RegisterAndEnable(registry, scheme); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/register.go
generated
vendored
|
@ -17,10 +17,8 @@ limitations under the License.
|
|||
package kubeadm
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
|
@ -49,9 +47,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&MasterConfiguration{},
|
||||
&NodeConfiguration{},
|
||||
&ClusterInfo{},
|
||||
&api.ListOptions{},
|
||||
&api.DeleteOptions{},
|
||||
&metav1.ExportOptions{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/types.go
generated
vendored
|
@ -39,6 +39,7 @@ type MasterConfiguration struct {
|
|||
Networking Networking
|
||||
KubernetesVersion string
|
||||
CloudProvider string
|
||||
AuthorizationMode string
|
||||
}
|
||||
|
||||
type API struct {
|
||||
|
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/BUILD
generated
vendored
|
@ -18,7 +18,6 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime/schema",
|
||||
|
|
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go
generated
vendored
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/defaults.go
generated
vendored
|
@ -16,17 +16,18 @@ limitations under the License.
|
|||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
import "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
const (
|
||||
DefaultServiceDNSDomain = "cluster.local"
|
||||
DefaultServicesSubnet = "10.96.0.0/12"
|
||||
DefaultKubernetesVersion = "stable"
|
||||
DefaultKubernetesFallbackVersion = "v1.5.0"
|
||||
DefaultServiceDNSDomain = "cluster.local"
|
||||
DefaultServicesSubnet = "10.96.0.0/12"
|
||||
DefaultKubernetesVersion = "stable"
|
||||
// This is only for clusters without internet, were the latest stable version can't be determined
|
||||
DefaultKubernetesFallbackVersion = "v1.5.2"
|
||||
DefaultAPIBindPort = 6443
|
||||
DefaultDiscoveryBindPort = 9898
|
||||
// TODO: Default this to RBAC when DefaultKubernetesFallbackVersion is v1.6-something
|
||||
DefaultAuthorizationMode = "AlwaysAllow"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
|
@ -56,4 +57,8 @@ func SetDefaults_MasterConfiguration(obj *MasterConfiguration) {
|
|||
if obj.Discovery.Token == nil && obj.Discovery.File == nil && obj.Discovery.HTTPS == nil {
|
||||
obj.Discovery.Token = &TokenDiscovery{}
|
||||
}
|
||||
|
||||
if obj.AuthorizationMode == "" {
|
||||
obj.AuthorizationMode = DefaultAuthorizationMode
|
||||
}
|
||||
}
|
||||
|
|
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/register.go
generated
vendored
5
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/register.go
generated
vendored
|
@ -20,7 +20,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
|
@ -49,10 +48,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
|||
&MasterConfiguration{},
|
||||
&NodeConfiguration{},
|
||||
&ClusterInfo{},
|
||||
&v1.ListOptions{},
|
||||
&v1.DeleteOptions{},
|
||||
&metav1.ExportOptions{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go
generated
vendored
1
vendor/k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1/types.go
generated
vendored
|
@ -29,6 +29,7 @@ type MasterConfiguration struct {
|
|||
Networking Networking `json:"networking"`
|
||||
KubernetesVersion string `json:"kubernetesVersion"`
|
||||
CloudProvider string `json:"cloudProvider"`
|
||||
AuthorizationMode string `json:"authorizationMode"`
|
||||
}
|
||||
|
||||
type API struct {
|
||||
|
|
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/BUILD
generated
vendored
14
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/BUILD
generated
vendored
|
@ -12,6 +12,7 @@ go_library(
|
|||
name = "go_default_library",
|
||||
srcs = [
|
||||
"cmd.go",
|
||||
"defaults.go",
|
||||
"init.go",
|
||||
"join.go",
|
||||
"reset.go",
|
||||
|
@ -24,25 +25,29 @@ go_library(
|
|||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/validation:go_default_library",
|
||||
"//cmd/kubeadm/app/cmd/flags:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/discovery:go_default_library",
|
||||
"//cmd/kubeadm/app/master:go_default_library",
|
||||
"//cmd/kubeadm/app/node:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/apiconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/fields:go_default_library",
|
||||
"//pkg/kubectl:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/util/flag:go_default_library",
|
||||
"//pkg/util/initsystem:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//vendor:github.com/blang/semver",
|
||||
"//vendor:github.com/renstrom/dedent",
|
||||
"//vendor:github.com/spf13/cobra",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/net",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -54,7 +59,10 @@ go_test(
|
|||
],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//cmd/kubeadm/app/preflight:go_default_library"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/preflight:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
|
94
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go
generated
vendored
Normal file
94
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/defaults.go
generated
vendored
Normal file
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
|
||||
"github.com/blang/semver"
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum version when using AllowAll as the default authz mode. Everything above this will use RBAC by default.
|
||||
allowAllMaxVersion = semver.MustParse("1.6.0-alpha.0")
|
||||
)
|
||||
|
||||
func setInitDynamicDefaults(cfg *kubeadmapi.MasterConfiguration) error {
|
||||
// Auto-detect the IP
|
||||
if len(cfg.API.AdvertiseAddresses) == 0 {
|
||||
ip, err := netutil.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cfg.API.AdvertiseAddresses = []string{ip.String()}
|
||||
}
|
||||
|
||||
// Validate version argument
|
||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion {
|
||||
return err
|
||||
} else {
|
||||
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
|
||||
}
|
||||
}
|
||||
cfg.KubernetesVersion = ver
|
||||
fmt.Println("[init] Using Kubernetes version:", ver)
|
||||
|
||||
// Omit the "v" in the beginning, otherwise semver will fail
|
||||
// If the version is newer than the specified version, RBAC v1beta1 support is enabled in the apiserver so we can default to RBAC
|
||||
k8sVersion, err := semver.Parse(cfg.KubernetesVersion[1:])
|
||||
if k8sVersion.GT(allowAllMaxVersion) {
|
||||
cfg.AuthorizationMode = "RBAC"
|
||||
}
|
||||
|
||||
fmt.Println("[init] Using Authorization mode:", cfg.AuthorizationMode)
|
||||
|
||||
// Warn about the limitations with the current cloudprovider solution.
|
||||
if cfg.CloudProvider != "" {
|
||||
fmt.Println("[init] WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.")
|
||||
fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)")
|
||||
}
|
||||
|
||||
// Validate token if any, otherwise generate
|
||||
if cfg.Discovery.Token != nil {
|
||||
if cfg.Discovery.Token.ID != "" && cfg.Discovery.Token.Secret != "" {
|
||||
fmt.Printf("[init] A token has been provided, validating [%s]\n", kubeadmutil.BearerToken(cfg.Discovery.Token))
|
||||
if valid, err := kubeadmutil.ValidateToken(cfg.Discovery.Token); valid == false {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
fmt.Println("[init] A token has not been provided, generating one")
|
||||
if err := kubeadmutil.GenerateToken(cfg.Discovery.Token); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// If there aren't any addresses specified, default to the first advertised address which can be user-provided or the default network interface's IP address
|
||||
if len(cfg.Discovery.Token.Addresses) == 0 {
|
||||
cfg.Discovery.Token.Addresses = []string{cfg.API.AdvertiseAddresses[0] + ":" + strconv.Itoa(kubeadmapiext.DefaultDiscoveryBindPort)}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
121
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go
generated
vendored
121
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/init.go
generated
vendored
|
@ -21,23 +21,20 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/renstrom/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/cmd/flags"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
|
||||
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig"
|
||||
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
netutil "k8s.io/apimachinery/pkg/util/net"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
|
@ -66,11 +63,12 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||
|
||||
var cfgPath string
|
||||
var skipPreFlight bool
|
||||
var selfHosted bool
|
||||
cmd := &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Run this in order to set up the Kubernetes master",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
i, err := NewInit(cfgPath, &cfg, skipPreFlight)
|
||||
i, err := NewInit(cfgPath, &cfg, skipPreFlight, selfHosted)
|
||||
kubeadmutil.CheckErr(err)
|
||||
kubeadmutil.CheckErr(i.Validate())
|
||||
kubeadmutil.CheckErr(i.Run(out))
|
||||
|
@ -115,7 +113,7 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&skipPreFlight, "skip-preflight-checks", skipPreFlight,
|
||||
"skip preflight checks normally run before modifying the system",
|
||||
"Skip preflight checks normally run before modifying the system",
|
||||
)
|
||||
|
||||
cmd.PersistentFlags().Var(
|
||||
|
@ -123,14 +121,15 @@ func NewCmdInit(out io.Writer) *cobra.Command {
|
|||
"The discovery method kubeadm will use for connecting nodes to the master",
|
||||
)
|
||||
|
||||
cmd.PersistentFlags().BoolVar(
|
||||
&selfHosted, "self-hosted", selfHosted,
|
||||
"Enable self-hosted control plane",
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
cfg *kubeadmapi.MasterConfiguration
|
||||
}
|
||||
|
||||
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool) (*Init, error) {
|
||||
func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight bool, selfHosted bool) (*Init, error) {
|
||||
|
||||
fmt.Println("[kubeadm] WARNING: kubeadm is in alpha, please do not use it for production clusters.")
|
||||
|
||||
|
@ -144,13 +143,10 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
|||
}
|
||||
}
|
||||
|
||||
// Auto-detect the IP
|
||||
if len(cfg.API.AdvertiseAddresses) == 0 {
|
||||
ip, err := netutil.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.API.AdvertiseAddresses = []string{ip.String()}
|
||||
// Set defaults dynamically that the API group defaulting can't (by fetching information from the internet, looking up network interfaces, etc.)
|
||||
err := setInitDynamicDefaults(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !skipPreFlight {
|
||||
|
@ -172,27 +168,15 @@ func NewInit(cfgPath string, cfg *kubeadmapi.MasterConfiguration, skipPreFlight
|
|||
// Try to start the kubelet service in case it's inactive
|
||||
preflight.TryStartKubelet()
|
||||
|
||||
// validate version argument
|
||||
ver, err := kubeadmutil.KubernetesReleaseVersion(cfg.KubernetesVersion)
|
||||
if err != nil {
|
||||
if cfg.KubernetesVersion != kubeadmapiext.DefaultKubernetesVersion {
|
||||
return nil, err
|
||||
} else {
|
||||
ver = kubeadmapiext.DefaultKubernetesFallbackVersion
|
||||
}
|
||||
}
|
||||
cfg.KubernetesVersion = ver
|
||||
fmt.Println("[init] Using Kubernetes version:", ver)
|
||||
|
||||
// Warn about the limitations with the current cloudprovider solution.
|
||||
if cfg.CloudProvider != "" {
|
||||
fmt.Println("WARNING: For cloudprovider integrations to work --cloud-provider must be set for all kubelets in the cluster.")
|
||||
fmt.Println("\t(/etc/systemd/system/kubelet.service.d/10-kubeadm.conf should be edited for this purpose)")
|
||||
}
|
||||
|
||||
return &Init{cfg: cfg}, nil
|
||||
return &Init{cfg: cfg, selfHosted: selfHosted}, nil
|
||||
}
|
||||
|
||||
type Init struct {
|
||||
cfg *kubeadmapi.MasterConfiguration
|
||||
selfHosted bool
|
||||
}
|
||||
|
||||
// Validate validates configuration passed to "kubeadm init"
|
||||
func (i *Init) Validate() error {
|
||||
return validation.ValidateMasterConfiguration(i.cfg).ToAggregate()
|
||||
}
|
||||
|
@ -201,32 +185,11 @@ func (i *Init) Validate() error {
|
|||
func (i *Init) Run(out io.Writer) error {
|
||||
|
||||
// PHASE 1: Generate certificates
|
||||
caCert, err := certphase.CreatePKIAssets(i.cfg, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
err := certphase.CreatePKIAssets(i.cfg, kubeadmapi.GlobalEnvParams.HostPKIPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Exception:
|
||||
if i.cfg.Discovery.Token != nil {
|
||||
// Validate token
|
||||
if valid, err := kubeadmutil.ValidateToken(i.cfg.Discovery.Token); valid == false {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make sure there is at least one address
|
||||
if len(i.cfg.Discovery.Token.Addresses) == 0 {
|
||||
ip, err := netutil.ChooseHostInterface()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i.cfg.Discovery.Token.Addresses = []string{ip.String() + ":" + strconv.Itoa(kubeadmapiext.DefaultDiscoveryBindPort)}
|
||||
}
|
||||
|
||||
if err := kubemaster.CreateTokenAuthFile(kubeadmutil.BearerToken(i.cfg.Discovery.Token)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// PHASE 2: Generate kubeconfig files for the admin and the kubelet
|
||||
|
||||
// TODO this is not great, but there is only one address we can use here
|
||||
|
@ -238,6 +201,14 @@ func (i *Init) Run(out io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// TODO: It's not great to have an exception for token here, but necessary because the apiserver doesn't handle this properly in the API yet
|
||||
// but relies on files on disk for now, which is daunting.
|
||||
if i.cfg.Discovery.Token != nil {
|
||||
if err := kubemaster.CreateTokenAuthFile(kubeadmutil.BearerToken(i.cfg.Discovery.Token)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Phase 3: Bootstrap the control plane
|
||||
if err := kubemaster.WriteStaticPodManifests(i.cfg); err != nil {
|
||||
return err
|
||||
|
@ -248,13 +219,31 @@ func (i *Init) Run(out io.Writer) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if i.cfg.AuthorizationMode == "RBAC" {
|
||||
err = apiconfig.CreateBootstrapRBACClusterRole(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = apiconfig.CreateKubeDNSRBACClusterRole(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: remove this when https://github.com/kubernetes/kubeadm/issues/114 is fixed
|
||||
err = apiconfig.CreateKubeProxyClusterRoleBinding(client)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubemaster.UpdateMasterRoleLabelsAndTaints(client, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if i.cfg.Discovery.Token != nil {
|
||||
fmt.Printf("[token-discovery] Using token: %s\n", kubeadmutil.BearerToken(i.cfg.Discovery.Token))
|
||||
if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client, caCert); err != nil {
|
||||
if err := kubemaster.CreateDiscoveryDeploymentAndSecret(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubeadmutil.UpdateOrCreateToken(client, i.cfg.Discovery.Token, kubeadmutil.DefaultTokenDuration); err != nil {
|
||||
|
@ -262,6 +251,16 @@ func (i *Init) Run(out io.Writer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Is deployment type self-hosted?
|
||||
if i.selfHosted {
|
||||
// Temporary control plane is up, now we create our self hosted control
|
||||
// plane components and remove the static manifests:
|
||||
fmt.Println("[init] Creating self-hosted control plane...")
|
||||
if err := kubemaster.CreateSelfHostedControlPlane(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := kubemaster.CreateEssentialAddons(i.cfg, client); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go
generated
vendored
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/join.go
generated
vendored
|
@ -20,15 +20,17 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/renstrom/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/validation"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/discovery"
|
||||
kubenode "k8s.io/kubernetes/cmd/kubeadm/app/node"
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
|
@ -136,10 +138,20 @@ func (j *Join) Run(out io.Writer) error {
|
|||
if err := kubenode.PerformTLSBootstrap(cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := kubeconfigphase.WriteKubeconfigToDisk(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfigphase.KubeletKubeConfigFileName), cfg); err != nil {
|
||||
|
||||
kubeconfigFile := filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfigphase.KubeletKubeConfigFileName)
|
||||
if err := kubeconfigphase.WriteKubeconfigToDisk(kubeconfigFile, cfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the ca certificate to disk so kubelet can use it for authentication
|
||||
cluster := cfg.Contexts[cfg.CurrentContext].Cluster
|
||||
caCertFile := filepath.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, kubeadmconstants.CACertName)
|
||||
err = certutil.WriteCert(caCertFile, cfg.Clusters[cluster].CertificateAuthorityData)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't save the CA certificate to disk: %v", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(out, joinDoneMsgf)
|
||||
return nil
|
||||
}
|
||||
|
|
25
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go
generated
vendored
25
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset.go
generated
vendored
|
@ -21,12 +21,14 @@ import (
|
|||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/util/initsystem"
|
||||
|
@ -108,7 +110,7 @@ func (r *Reset) Run(out io.Writer) error {
|
|||
fmt.Printf("[reset] Failed to unmount mounted directories in /var/lib/kubelet: %s\n", string(umountOutputBytes))
|
||||
}
|
||||
|
||||
dockerCheck := preflight.ServiceCheck{Service: "docker"}
|
||||
dockerCheck := preflight.ServiceCheck{Service: "docker", CheckIfActive: true}
|
||||
if warnings, errors := dockerCheck.Check(); len(warnings) == 0 && len(errors) == 0 {
|
||||
fmt.Println("[reset] Removing kubernetes-managed containers")
|
||||
if err := exec.Command("sh", "-c", "docker ps | grep 'k8s_' | awk '{print $1}' | xargs -r docker rm --force --volumes").Run(); err != nil {
|
||||
|
@ -122,7 +124,7 @@ func (r *Reset) Run(out io.Writer) error {
|
|||
|
||||
// Only clear etcd data when the etcd manifest is found. In case it is not found, we must assume that the user
|
||||
// provided external etcd endpoints. In that case, it is his own responsibility to reset etcd
|
||||
etcdManifestPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests/etcd.json")
|
||||
etcdManifestPath := filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests/etcd.json")
|
||||
if _, err := os.Stat(etcdManifestPath); err == nil {
|
||||
dirsToClean = append(dirsToClean, "/var/lib/etcd")
|
||||
} else {
|
||||
|
@ -150,7 +152,7 @@ func drainAndRemoveNode(removeNode bool) error {
|
|||
hostname = strings.ToLower(hostname)
|
||||
|
||||
// TODO: Use the "native" k8s client for this once we're confident the versioned is working
|
||||
kubeConfigPath := path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")
|
||||
kubeConfigPath := filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.KubeletKubeConfigFileName)
|
||||
|
||||
getNodesCmd := fmt.Sprintf("kubectl --kubeconfig %s get nodes | grep %s", kubeConfigPath, hostname)
|
||||
output, err := exec.Command("sh", "-c", getNodesCmd).Output()
|
||||
|
@ -179,14 +181,14 @@ func drainAndRemoveNode(removeNode bool) error {
|
|||
}
|
||||
|
||||
// cleanDir removes everything in a directory, but not the directory itself
|
||||
func cleanDir(filepath string) error {
|
||||
func cleanDir(filePath string) error {
|
||||
// If the directory doesn't even exist there's nothing to do, and we do
|
||||
// not consider this an error
|
||||
if _, err := os.Stat(filepath); os.IsNotExist(err) {
|
||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
d, err := os.Open(filepath)
|
||||
d, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -196,7 +198,7 @@ func cleanDir(filepath string) error {
|
|||
return err
|
||||
}
|
||||
for _, name := range names {
|
||||
err = os.RemoveAll(path.Join(filepath, name))
|
||||
err = os.RemoveAll(filepath.Join(filePath, name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -207,7 +209,7 @@ func cleanDir(filepath string) error {
|
|||
// resetConfigDir is used to cleanup the files kubeadm writes in /etc/kubernetes/.
|
||||
func resetConfigDir(configPathDir, pkiPathDir string) {
|
||||
dirsToClean := []string{
|
||||
path.Join(configPathDir, "manifests"),
|
||||
filepath.Join(configPathDir, "manifests"),
|
||||
pkiPathDir,
|
||||
}
|
||||
fmt.Printf("[reset] Deleting contents of config directories: %v\n", dirsToClean)
|
||||
|
@ -219,8 +221,9 @@ func resetConfigDir(configPathDir, pkiPathDir string) {
|
|||
}
|
||||
|
||||
filesToClean := []string{
|
||||
path.Join(configPathDir, "admin.conf"),
|
||||
path.Join(configPathDir, "kubelet.conf"),
|
||||
filepath.Join(configPathDir, kubeconfig.AdminKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeconfig.KubeletKubeConfigFileName),
|
||||
filepath.Join(configPathDir, kubeadmconstants.CACertName),
|
||||
}
|
||||
fmt.Printf("[reset] Deleting files: %v\n", filesToClean)
|
||||
for _, path := range filesToClean {
|
||||
|
|
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset_test.go
generated
vendored
19
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/reset_test.go
generated
vendored
|
@ -22,6 +22,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/preflight"
|
||||
)
|
||||
|
||||
|
@ -63,8 +64,8 @@ func TestConfigDirCleaner(t *testing.T) {
|
|||
"manifests/etcd.json",
|
||||
"manifests/kube-apiserver.json",
|
||||
"pki/ca.pem",
|
||||
"admin.conf",
|
||||
"kubelet.conf",
|
||||
kubeconfig.AdminKubeConfigFileName,
|
||||
kubeconfig.KubeletKubeConfigFileName,
|
||||
},
|
||||
verifyExists: []string{
|
||||
"manifests",
|
||||
|
@ -77,7 +78,7 @@ func TestConfigDirCleaner(t *testing.T) {
|
|||
},
|
||||
setupFiles: []string{
|
||||
"pki/ca.pem",
|
||||
"kubelet.conf",
|
||||
kubeconfig.KubeletKubeConfigFileName,
|
||||
},
|
||||
verifyExists: []string{
|
||||
"pki",
|
||||
|
@ -95,8 +96,8 @@ func TestConfigDirCleaner(t *testing.T) {
|
|||
"manifests/etcd.json",
|
||||
"manifests/kube-apiserver.json",
|
||||
"pki/ca.pem",
|
||||
"admin.conf",
|
||||
"kubelet.conf",
|
||||
kubeconfig.AdminKubeConfigFileName,
|
||||
kubeconfig.KubeletKubeConfigFileName,
|
||||
"cloud-config",
|
||||
},
|
||||
verifyExists: []string{
|
||||
|
@ -115,8 +116,8 @@ func TestConfigDirCleaner(t *testing.T) {
|
|||
"manifests/etcd.json",
|
||||
"manifests/kube-apiserver.json",
|
||||
"pki/ca.pem",
|
||||
"admin.conf",
|
||||
"kubelet.conf",
|
||||
kubeconfig.AdminKubeConfigFileName,
|
||||
kubeconfig.KubeletKubeConfigFileName,
|
||||
".cloud-config",
|
||||
".mydir/.myfile",
|
||||
},
|
||||
|
@ -166,8 +167,8 @@ func TestConfigDirCleaner(t *testing.T) {
|
|||
|
||||
// Verify the files we cleanup implicitly in every test:
|
||||
assertExists(t, tmpDir)
|
||||
assertNotExists(t, filepath.Join(tmpDir, "admin.conf"))
|
||||
assertNotExists(t, filepath.Join(tmpDir, "kubelet.conf"))
|
||||
assertNotExists(t, filepath.Join(tmpDir, kubeconfig.AdminKubeConfigFileName))
|
||||
assertNotExists(t, filepath.Join(tmpDir, kubeconfig.KubeletKubeConfigFileName))
|
||||
assertDirEmpty(t, filepath.Join(tmpDir, "manifests"))
|
||||
assertDirEmpty(t, filepath.Join(tmpDir, "pki"))
|
||||
|
||||
|
|
17
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go
generated
vendored
17
vendor/k8s.io/kubernetes/cmd/kubeadm/app/cmd/token.go
generated
vendored
|
@ -27,12 +27,13 @@ import (
|
|||
"github.com/renstrom/dedent"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubemaster "k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/kubectl"
|
||||
)
|
||||
|
||||
|
@ -123,7 +124,7 @@ func NewCmdTokenGenerate(out io.Writer) *cobra.Command {
|
|||
|
||||
// RunCreateToken generates a new bootstrap token and stores it as a secret on the server.
|
||||
func RunCreateToken(out io.Writer, cmd *cobra.Command, tokenDuration time.Duration, token string) error {
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "admin.conf"))
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.AdminKubeConfigFileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -156,7 +157,7 @@ func RunGenerateToken(out io.Writer) error {
|
|||
|
||||
// RunListTokens lists details on all existing bootstrap tokens on the server.
|
||||
func RunListTokens(out io.Writer, errW io.Writer, cmd *cobra.Command) error {
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "admin.conf"))
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.AdminKubeConfigFileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -166,11 +167,11 @@ func RunListTokens(out io.Writer, errW io.Writer, cmd *cobra.Command) error {
|
|||
api.SecretTypeField: string(api.SecretTypeBootstrapToken),
|
||||
},
|
||||
)
|
||||
listOptions := v1.ListOptions{
|
||||
listOptions := metav1.ListOptions{
|
||||
FieldSelector: tokenSelector.String(),
|
||||
}
|
||||
|
||||
results, err := client.Secrets(api.NamespaceSystem).List(listOptions)
|
||||
results, err := client.Secrets(metav1.NamespaceSystem).List(listOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list bootstrap tokens [%v]", err)
|
||||
}
|
||||
|
@ -215,13 +216,13 @@ func RunDeleteToken(out io.Writer, cmd *cobra.Command, tokenId string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "admin.conf"))
|
||||
client, err := kubemaster.CreateClientFromFile(path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.AdminKubeConfigFileName))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tokenSecretName := fmt.Sprintf("%s%s", kubeadmutil.BootstrapTokenSecretPrefix, tokenId)
|
||||
if err := client.Secrets(api.NamespaceSystem).Delete(tokenSecretName, nil); err != nil {
|
||||
if err := client.Secrets(metav1.NamespaceSystem).Delete(tokenSecretName, nil); err != nil {
|
||||
return fmt.Errorf("failed to delete bootstrap token [%v]", err)
|
||||
}
|
||||
fmt.Fprintf(out, "[token] bootstrap token deleted: %s\n", tokenId)
|
||||
|
|
27
vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD
generated
vendored
Normal file
27
vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["constants.go"],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
31
vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go
generated
vendored
Normal file
31
vendor/k8s.io/kubernetes/cmd/kubeadm/app/constants/constants.go
generated
vendored
Normal file
|
@ -0,0 +1,31 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package constants
|
||||
|
||||
const (
|
||||
CACertAndKeyBaseName = "ca"
|
||||
CACertName = "ca.crt"
|
||||
CAKeyName = "ca.key"
|
||||
|
||||
APIServerCertAndKeyBaseName = "apiserver"
|
||||
APIServerCertName = "apiserver.crt"
|
||||
APIServerKeyName = "apiserver.key"
|
||||
|
||||
APIServerKubeletClientCertAndKeyBaseName = "apiserver-kubelet-client"
|
||||
APIServerKubeletClientCertName = "apiserver-kubelet-client.crt"
|
||||
APIServerKubeletClientKeyName = "apiserver-kubelet-client.key"
|
||||
)
|
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/BUILD
generated
vendored
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/BUILD
generated
vendored
|
@ -22,9 +22,9 @@ go_library(
|
|||
"//cmd/kubeadm/app/discovery/token:go_default_library",
|
||||
"//cmd/kubeadm/app/node:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//vendor:github.com/spf13/pflag",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/discovery.go
generated
vendored
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/discovery/discovery.go
generated
vendored
|
@ -21,11 +21,11 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubenode "k8s.io/kubernetes/cmd/kubeadm/app/node"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
)
|
||||
|
||||
// For identifies and executes the desired discovery mechanism.
|
||||
|
|
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/images/images.go
generated
vendored
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/images/images.go
generated
vendored
|
@ -39,10 +39,8 @@ const (
|
|||
gcrPrefix = "gcr.io/google_containers"
|
||||
etcdVersion = "3.0.14-kubeadm"
|
||||
|
||||
kubeDNSVersion = "1.10.1"
|
||||
dnsmasqVersion = "1.10.1"
|
||||
kubeDNSSidecarVersion = "1.10.1"
|
||||
pauseVersion = "3.0"
|
||||
kubeDNSVersion = "1.11.0"
|
||||
pauseVersion = "3.0"
|
||||
)
|
||||
|
||||
func GetCoreImage(image string, cfg *kubeadmapi.MasterConfiguration, overrideImage string) string {
|
||||
|
@ -63,8 +61,8 @@ func GetAddonImage(image string) string {
|
|||
repoPrefix := kubeadmapi.GlobalEnvParams.RepositoryPrefix
|
||||
return map[string]string{
|
||||
KubeDNSImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, KubeDNSImage, runtime.GOARCH, kubeDNSVersion),
|
||||
KubeDNSmasqImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, KubeDNSmasqImage, runtime.GOARCH, dnsmasqVersion),
|
||||
KubeDNSSidecarImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, KubeDNSSidecarImage, runtime.GOARCH, kubeDNSSidecarVersion),
|
||||
KubeDNSmasqImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, KubeDNSmasqImage, runtime.GOARCH, kubeDNSVersion),
|
||||
KubeDNSSidecarImage: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, KubeDNSSidecarImage, runtime.GOARCH, kubeDNSVersion),
|
||||
Pause: fmt.Sprintf("%s/%s-%s:%s", repoPrefix, Pause, runtime.GOARCH, pauseVersion),
|
||||
}[image]
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/images/images_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/images/images_test.go
generated
vendored
|
@ -88,11 +88,11 @@ func TestGetAddonImage(t *testing.T) {
|
|||
},
|
||||
{
|
||||
KubeDNSmasqImage,
|
||||
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, KubeDNSmasqImage, runtime.GOARCH, dnsmasqVersion),
|
||||
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, KubeDNSmasqImage, runtime.GOARCH, kubeDNSVersion),
|
||||
},
|
||||
{
|
||||
KubeDNSSidecarImage,
|
||||
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, KubeDNSSidecarImage, runtime.GOARCH, kubeDNSSidecarVersion),
|
||||
fmt.Sprintf("%s/%s-%s:%s", gcrPrefix, KubeDNSSidecarImage, runtime.GOARCH, kubeDNSVersion),
|
||||
},
|
||||
{
|
||||
Pause,
|
||||
|
|
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/BUILD
generated
vendored
|
@ -15,30 +15,32 @@ go_library(
|
|||
"apiclient.go",
|
||||
"discovery.go",
|
||||
"manifests.go",
|
||||
"selfhosted.go",
|
||||
"tokens.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/apis/kubeadm/v1alpha1:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/images:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/api/resource:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/extensions/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//pkg/kubectl/cmd/util:go_default_library",
|
||||
"//pkg/registry/core/service/ipallocator:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//pkg/util/intstr:go_default_library",
|
||||
"//pkg/util/uuid:go_default_library",
|
||||
"//vendor:github.com/blang/semver",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
26
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/addons.go
generated
vendored
26
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/addons.go
generated
vendored
|
@ -21,9 +21,10 @@ import (
|
|||
"net"
|
||||
"path"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
|
@ -31,6 +32,8 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
)
|
||||
|
||||
const KubeDNS = "kube-dns"
|
||||
|
||||
func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
||||
privilegedTrue := true
|
||||
return v1.PodSpec{
|
||||
|
@ -68,7 +71,7 @@ func createKubeProxyPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
|||
{
|
||||
Name: "kubeconfig",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
HostPath: &v1.HostPathVolumeSource{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.KubeletKubeConfigFileName)},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -86,6 +89,7 @@ func createKubeDNSPodSpec(cfg *kubeadmapi.MasterConfiguration) v1.PodSpec {
|
|||
dnsmasqPort := int32(53)
|
||||
|
||||
return v1.PodSpec{
|
||||
ServiceAccountName: KubeDNS,
|
||||
Containers: []v1.Container{
|
||||
// DNS server
|
||||
{
|
||||
|
@ -250,7 +254,7 @@ func createKubeDNSServiceSpec(cfg *kubeadmapi.MasterConfiguration) (*v1.ServiceS
|
|||
}
|
||||
|
||||
return &v1.ServiceSpec{
|
||||
Selector: map[string]string{"name": "kube-dns"},
|
||||
Selector: map[string]string{"name": KubeDNS},
|
||||
Ports: []v1.ServicePort{
|
||||
{Name: "dns", Port: 53, Protocol: v1.ProtocolUDP},
|
||||
{Name: "dns-tcp", Port: 53, Protocol: v1.ProtocolTCP},
|
||||
|
@ -264,17 +268,21 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
|||
SetMasterTaintTolerations(&kubeProxyDaemonSet.Spec.Template.ObjectMeta)
|
||||
SetNodeAffinity(&kubeProxyDaemonSet.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity())
|
||||
|
||||
if _, err := client.Extensions().DaemonSets(api.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil {
|
||||
if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(kubeProxyDaemonSet); err != nil {
|
||||
return fmt.Errorf("failed creating essential kube-proxy addon [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Println("[addons] Created essential addon: kube-proxy")
|
||||
|
||||
kubeDNSDeployment := NewDeployment("kube-dns", 1, createKubeDNSPodSpec(cfg))
|
||||
kubeDNSDeployment := NewDeployment(KubeDNS, 1, createKubeDNSPodSpec(cfg))
|
||||
SetMasterTaintTolerations(&kubeDNSDeployment.Spec.Template.ObjectMeta)
|
||||
SetNodeAffinity(&kubeDNSDeployment.Spec.Template.ObjectMeta, NativeArchitectureNodeAffinity())
|
||||
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
|
||||
kubeDNSServiceAccount := &v1.ServiceAccount{}
|
||||
kubeDNSServiceAccount.ObjectMeta.Name = KubeDNS
|
||||
if _, err := client.ServiceAccounts(metav1.NamespaceSystem).Create(kubeDNSServiceAccount); err != nil {
|
||||
return fmt.Errorf("failed creating kube-dns service account [%v]", err)
|
||||
}
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kubeDNSDeployment); err != nil {
|
||||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
||||
|
@ -283,9 +291,9 @@ func CreateEssentialAddons(cfg *kubeadmapi.MasterConfiguration, client *clientse
|
|||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
||||
kubeDNSService := NewService("kube-dns", *kubeDNSServiceSpec)
|
||||
kubeDNSService := NewService(KubeDNS, *kubeDNSServiceSpec)
|
||||
kubeDNSService.ObjectMeta.Labels["kubernetes.io/name"] = "KubeDNS"
|
||||
if _, err := client.Services(api.NamespaceSystem).Create(kubeDNSService); err != nil {
|
||||
if _, err := client.Services(metav1.NamespaceSystem).Create(kubeDNSService); err != nil {
|
||||
return fmt.Errorf("failed creating essential kube-dns addon [%v]", err)
|
||||
}
|
||||
|
||||
|
|
88
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/apiclient.go
generated
vendored
88
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/apiclient.go
generated
vendored
|
@ -25,17 +25,17 @@ import (
|
|||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
)
|
||||
|
||||
const apiCallRetryInterval = 500 * time.Millisecond
|
||||
|
||||
// TODO: This method shouldn't exist as a standalone function but be integrated into CreateClientFromFile
|
||||
func createAPIClient(adminKubeconfig *clientcmdapi.Config) (*clientset.Clientset, error) {
|
||||
adminClientConfig, err := clientcmd.NewDefaultClientConfig(
|
||||
*adminKubeconfig,
|
||||
|
@ -65,36 +65,14 @@ func CreateClientAndWaitForAPI(file string) (*clientset.Clientset, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fmt.Println("[apiclient] Created API client, waiting for the control plane to become ready")
|
||||
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
cs, err := client.ComponentStatuses().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
// TODO(phase2) must revisit this when we implement HA
|
||||
if len(cs.Items) < 3 {
|
||||
fmt.Println("[apiclient] Not all control plane components are ready yet")
|
||||
return false, nil
|
||||
}
|
||||
for _, item := range cs.Items {
|
||||
for _, condition := range item.Conditions {
|
||||
if condition.Type != v1.ComponentHealthy {
|
||||
fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
|
||||
return true, nil
|
||||
})
|
||||
WaitForAPI(client)
|
||||
|
||||
fmt.Println("[apiclient] Waiting for at least one node to register and become ready")
|
||||
start = time.Now()
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
nodeList, err := client.Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := client.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
fmt.Println("[apiclient] Temporarily unable to list nodes (will retry)")
|
||||
return false, nil
|
||||
|
@ -124,14 +102,44 @@ func standardLabels(n string) map[string]string {
|
|||
}
|
||||
}
|
||||
|
||||
func WaitForAPI(client *clientset.Clientset) {
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
// TODO: use /healthz API instead of this
|
||||
cs, err := client.ComponentStatuses().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
if apierrs.IsForbidden(err) {
|
||||
fmt.Println("[apiclient] Waiting for API server authorization")
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// TODO(phase2) must revisit this when we implement HA
|
||||
if len(cs.Items) < 3 {
|
||||
return false, nil
|
||||
}
|
||||
for _, item := range cs.Items {
|
||||
for _, condition := range item.Conditions {
|
||||
if condition.Type != v1.ComponentHealthy {
|
||||
fmt.Printf("[apiclient] Control plane component %q is still unhealthy: %#v\n", item.ObjectMeta.Name, item.Conditions)
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Printf("[apiclient] All control plane components are healthy after %f seconds\n", time.Since(start).Seconds())
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
func NewDaemonSet(daemonName string, podSpec v1.PodSpec) *extensions.DaemonSet {
|
||||
l := standardLabels(daemonName)
|
||||
return &extensions.DaemonSet{
|
||||
ObjectMeta: v1.ObjectMeta{Name: daemonName},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: daemonName},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &metav1.LabelSelector{MatchLabels: l},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: l},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: l},
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
|
@ -141,7 +149,7 @@ func NewDaemonSet(daemonName string, podSpec v1.PodSpec) *extensions.DaemonSet {
|
|||
func NewService(serviceName string, spec v1.ServiceSpec) *v1.Service {
|
||||
l := standardLabels(serviceName)
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: serviceName,
|
||||
Labels: l,
|
||||
},
|
||||
|
@ -152,12 +160,12 @@ func NewService(serviceName string, spec v1.ServiceSpec) *v1.Service {
|
|||
func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *extensions.Deployment {
|
||||
l := standardLabels(deploymentName)
|
||||
return &extensions.Deployment{
|
||||
ObjectMeta: v1.ObjectMeta{Name: deploymentName},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: deploymentName},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: &replicas,
|
||||
Selector: &metav1.LabelSelector{MatchLabels: l},
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: l},
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: l},
|
||||
Spec: podSpec,
|
||||
},
|
||||
},
|
||||
|
@ -167,7 +175,7 @@ func NewDeployment(deploymentName string, replicas int32, podSpec v1.PodSpec) *e
|
|||
// It's safe to do this for alpha, as we don't have HA and there is no way we can get
|
||||
// more then one node here (TODO(phase1+) use os.Hostname)
|
||||
func findMyself(client *clientset.Clientset) (*v1.Node, error) {
|
||||
nodeList, err := client.Nodes().List(v1.ListOptions{})
|
||||
nodeList, err := client.Nodes().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to list nodes [%v]", err)
|
||||
}
|
||||
|
@ -213,7 +221,7 @@ func UpdateMasterRoleLabelsAndTaints(client *clientset.Clientset, schedulable bo
|
|||
return nil
|
||||
}
|
||||
|
||||
func SetMasterTaintTolerations(meta *v1.ObjectMeta) {
|
||||
func SetMasterTaintTolerations(meta *metav1.ObjectMeta) {
|
||||
tolerationsAnnotation, _ := json.Marshal([]v1.Toleration{{Key: "dedicated", Value: "master", Effect: "NoSchedule"}})
|
||||
if meta.Annotations == nil {
|
||||
meta.Annotations = map[string]string{}
|
||||
|
@ -222,7 +230,7 @@ func SetMasterTaintTolerations(meta *v1.ObjectMeta) {
|
|||
}
|
||||
|
||||
// SetNodeAffinity is a basic helper to set meta.Annotations[v1.AffinityAnnotationKey] for one or more v1.NodeSelectorRequirement(s)
|
||||
func SetNodeAffinity(meta *v1.ObjectMeta, expr ...v1.NodeSelectorRequirement) {
|
||||
func SetNodeAffinity(meta *metav1.ObjectMeta, expr ...v1.NodeSelectorRequirement) {
|
||||
nodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{MatchExpressions: expr}},
|
||||
|
@ -265,7 +273,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
|||
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
// TODO: we should check the error, as some cases may be fatal
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(dummyDeployment); err != nil {
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(dummyDeployment); err != nil {
|
||||
fmt.Printf("[apiclient] Failed to create test deployment [%v] (will retry)\n", err)
|
||||
return false, nil
|
||||
}
|
||||
|
@ -273,7 +281,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
|||
})
|
||||
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
d, err := client.Extensions().Deployments(api.NamespaceSystem).Get("dummy", metav1.GetOptions{})
|
||||
d, err := client.Extensions().Deployments(metav1.NamespaceSystem).Get("dummy", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Printf("[apiclient] Failed to get test deployment [%v] (will retry)\n", err)
|
||||
return false, nil
|
||||
|
@ -287,7 +295,7 @@ func createDummyDeployment(client *clientset.Clientset) {
|
|||
fmt.Println("[apiclient] Test deployment succeeded")
|
||||
|
||||
// TODO: In the future, make sure the ReplicaSet and Pod are garbage collected
|
||||
if err := client.Extensions().Deployments(api.NamespaceSystem).Delete("dummy", &v1.DeleteOptions{}); err != nil {
|
||||
if err := client.Extensions().Deployments(metav1.NamespaceSystem).Delete("dummy", &metav1.DeleteOptions{}); err != nil {
|
||||
fmt.Printf("[apiclient] Failed to delete test deployment [%v] (will ignore)\n", err)
|
||||
}
|
||||
}
|
||||
|
|
25
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/discovery.go
generated
vendored
25
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/discovery.go
generated
vendored
|
@ -20,18 +20,19 @@ import (
|
|||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmapiext "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1alpha1"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
type kubeDiscovery struct {
|
||||
|
@ -109,7 +110,7 @@ func newKubeDiscovery(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certific
|
|||
kd := kubeDiscovery{
|
||||
Deployment: NewDeployment(kubeDiscoveryName, 1, newKubeDiscoveryPodSpec(cfg)),
|
||||
Secret: &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{Name: kubeDiscoverySecretName},
|
||||
ObjectMeta: metav1.ObjectMeta{Name: kubeDiscoverySecretName},
|
||||
Type: v1.SecretTypeOpaque,
|
||||
Data: encodeKubeDiscoverySecretData(cfg.Discovery.Token, cfg.API, caCert),
|
||||
},
|
||||
|
@ -121,13 +122,23 @@ func newKubeDiscovery(cfg *kubeadmapi.MasterConfiguration, caCert *x509.Certific
|
|||
return kd
|
||||
}
|
||||
|
||||
func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, caCert *x509.Certificate) error {
|
||||
func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
|
||||
caCertificatePath := path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, kubeadmconstants.CACertName)
|
||||
caCerts, err := certutil.CertsFromFile(caCertificatePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't load the CA certificate file %s: %v", caCertificatePath, err)
|
||||
}
|
||||
|
||||
// We are only putting one certificate in the certificate pem file, so it's safe to just pick the first one
|
||||
// TODO: Support multiple certs here in order to be able to rotate certs
|
||||
caCert := caCerts[0]
|
||||
|
||||
kd := newKubeDiscovery(cfg, caCert)
|
||||
|
||||
if _, err := client.Extensions().Deployments(api.NamespaceSystem).Create(kd.Deployment); err != nil {
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(kd.Deployment); err != nil {
|
||||
return fmt.Errorf("failed to create %q deployment [%v]", kubeDiscoveryName, err)
|
||||
}
|
||||
if _, err := client.Secrets(api.NamespaceSystem).Create(kd.Secret); err != nil {
|
||||
if _, err := client.Secrets(metav1.NamespaceSystem).Create(kd.Secret); err != nil {
|
||||
return fmt.Errorf("failed to create %q secret [%v]", kubeDiscoverySecretName, err)
|
||||
}
|
||||
|
||||
|
@ -135,7 +146,7 @@ func CreateDiscoveryDeploymentAndSecret(cfg *kubeadmapi.MasterConfiguration, cli
|
|||
|
||||
start := time.Now()
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
d, err := client.Extensions().Deployments(api.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{})
|
||||
d, err := client.Extensions().Deployments(metav1.NamespaceSystem).Get(kubeDiscoveryName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
|
136
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go
generated
vendored
136
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests.go
generated
vendored
|
@ -26,6 +26,7 @@ import (
|
|||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
api "k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -40,15 +41,17 @@ const (
|
|||
DefaultClusterName = "kubernetes"
|
||||
DefaultCloudConfigPath = "/etc/kubernetes/cloud-config"
|
||||
|
||||
etcd = "etcd"
|
||||
apiServer = "apiserver"
|
||||
controllerManager = "controller-manager"
|
||||
scheduler = "scheduler"
|
||||
proxy = "proxy"
|
||||
kubeAPIServer = "kube-apiserver"
|
||||
kubeControllerManager = "kube-controller-manager"
|
||||
kubeScheduler = "kube-scheduler"
|
||||
kubeProxy = "kube-proxy"
|
||||
etcd = "etcd"
|
||||
apiServer = "apiserver"
|
||||
controllerManager = "controller-manager"
|
||||
scheduler = "scheduler"
|
||||
proxy = "proxy"
|
||||
kubeAPIServer = "kube-apiserver"
|
||||
kubeControllerManager = "kube-controller-manager"
|
||||
kubeScheduler = "kube-scheduler"
|
||||
kubeProxy = "kube-proxy"
|
||||
authorizationPolicyFile = "abac_policy.json"
|
||||
authorizationWebhookConfigFile = "webhook_authz.conf"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -80,7 +83,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
|||
kubeAPIServer: componentPod(api.Container{
|
||||
Name: kubeAPIServer,
|
||||
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getAPIServerCommand(cfg),
|
||||
Command: getAPIServerCommand(cfg, false),
|
||||
VolumeMounts: volumeMounts,
|
||||
LivenessProbe: componentProbe(8080, "/healthz"),
|
||||
Resources: componentResources("250m"),
|
||||
|
@ -89,7 +92,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
|||
kubeControllerManager: componentPod(api.Container{
|
||||
Name: kubeControllerManager,
|
||||
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getControllerManagerCommand(cfg),
|
||||
Command: getControllerManagerCommand(cfg, false),
|
||||
VolumeMounts: volumeMounts,
|
||||
LivenessProbe: componentProbe(10252, "/healthz"),
|
||||
Resources: componentResources("200m"),
|
||||
|
@ -98,7 +101,7 @@ func WriteStaticPodManifests(cfg *kubeadmapi.MasterConfiguration) error {
|
|||
kubeScheduler: componentPod(api.Container{
|
||||
Name: kubeScheduler,
|
||||
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getSchedulerCommand(cfg),
|
||||
Command: getSchedulerCommand(cfg, false),
|
||||
LivenessProbe: componentProbe(10251, "/healthz"),
|
||||
Resources: componentResources("100m"),
|
||||
Env: getProxyEnvVars(),
|
||||
|
@ -215,6 +218,23 @@ func pkiVolumeMount() api.VolumeMount {
|
|||
}
|
||||
}
|
||||
|
||||
func flockVolume() api.Volume {
|
||||
return api.Volume{
|
||||
Name: "var-lock",
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{Path: "/var/lock"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func flockVolumeMount() api.VolumeMount {
|
||||
return api.VolumeMount{
|
||||
Name: "var-lock",
|
||||
MountPath: "/var/lock",
|
||||
ReadOnly: false,
|
||||
}
|
||||
}
|
||||
|
||||
func k8sVolume(cfg *kubeadmapi.MasterConfiguration) api.Volume {
|
||||
return api.Volume{
|
||||
Name: "k8s",
|
||||
|
@ -261,7 +281,7 @@ func componentPod(container api.Container, volumes ...api.Volume) api.Pod {
|
|||
APIVersion: "v1",
|
||||
Kind: "Pod",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: container.Name,
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{"component": container.Name, "tier": "control-plane"},
|
||||
|
@ -282,23 +302,51 @@ func getComponentBaseCommand(component string) []string {
|
|||
return []string{"kube-" + component}
|
||||
}
|
||||
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
command := append(getComponentBaseCommand(apiServer),
|
||||
func getCertFilePath(certName string) string {
|
||||
return path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, certName)
|
||||
}
|
||||
|
||||
func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||
var command []string
|
||||
|
||||
// self-hosted apiserver needs to wait on a lock
|
||||
if selfHosted {
|
||||
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
|
||||
}
|
||||
|
||||
command = append(getComponentBaseCommand(apiServer),
|
||||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range="+cfg.Networking.ServiceSubnet,
|
||||
"--service-account-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--client-ca-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--tls-cert-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver.pem",
|
||||
"--tls-private-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--service-account-key-file="+getCertFilePath(kubeadmconstants.APIServerKeyName),
|
||||
"--client-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
|
||||
"--tls-cert-file="+getCertFilePath(kubeadmconstants.APIServerCertName),
|
||||
"--tls-private-key-file="+getCertFilePath(kubeadmconstants.APIServerKeyName),
|
||||
"--kubelet-client-certificate="+getCertFilePath(kubeadmconstants.APIServerKubeletClientCertName),
|
||||
"--kubelet-client-key="+getCertFilePath(kubeadmconstants.APIServerKubeletClientKeyName),
|
||||
"--token-auth-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", cfg.API.Port),
|
||||
"--allow-privileged",
|
||||
"--storage-backend=etcd3",
|
||||
)
|
||||
|
||||
if cfg.AuthorizationMode != "" {
|
||||
command = append(command, "--authorization-mode="+cfg.AuthorizationMode)
|
||||
switch cfg.AuthorizationMode {
|
||||
case "ABAC":
|
||||
command = append(command, "--authorization-policy-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, authorizationPolicyFile))
|
||||
case "Webhook":
|
||||
command = append(command, "--authorization-webhook-config-file="+path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, authorizationWebhookConfigFile))
|
||||
}
|
||||
}
|
||||
|
||||
// Use first address we are given
|
||||
if len(cfg.API.AdvertiseAddresses) > 0 {
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
if selfHosted {
|
||||
command = append(command, "--advertise-address=$(POD_IP)")
|
||||
} else {
|
||||
command = append(command, fmt.Sprintf("--advertise-address=%s", cfg.API.AdvertiseAddresses[0]))
|
||||
}
|
||||
}
|
||||
|
||||
if len(cfg.KubernetesVersion) != 0 {
|
||||
|
@ -347,17 +395,24 @@ func getAPIServerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
|||
return command
|
||||
}
|
||||
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
command := append(getComponentBaseCommand(controllerManager),
|
||||
func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||
var command []string
|
||||
|
||||
// self-hosted controller-manager needs to wait on a lock
|
||||
if selfHosted {
|
||||
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/controller-manager.lock"}
|
||||
}
|
||||
|
||||
command = append(getComponentBaseCommand(controllerManager),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name="+DefaultClusterName,
|
||||
"--root-ca-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--service-account-private-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca.pem",
|
||||
"--cluster-signing-key-file="+kubeadmapi.GlobalEnvParams.HostPKIPath+"/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--root-ca-file="+getCertFilePath(kubeadmconstants.CACertName),
|
||||
"--service-account-private-key-file="+getCertFilePath(kubeadmconstants.APIServerKeyName),
|
||||
"--cluster-signing-cert-file="+getCertFilePath(kubeadmconstants.CACertName),
|
||||
"--cluster-signing-key-file="+getCertFilePath(kubeadmconstants.CAKeyName),
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group="+KubeletBootstrapGroup,
|
||||
)
|
||||
|
||||
if cfg.CloudProvider != "" {
|
||||
|
@ -374,15 +429,25 @@ func getControllerManagerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
|||
if cfg.Networking.PodSubnet != "" {
|
||||
command = append(command, "--allocate-node-cidrs=true", "--cluster-cidr="+cfg.Networking.PodSubnet)
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
return append(getComponentBaseCommand(scheduler),
|
||||
func getSchedulerCommand(cfg *kubeadmapi.MasterConfiguration, selfHosted bool) []string {
|
||||
var command []string
|
||||
|
||||
// self-hosted apiserver needs to wait on a lock
|
||||
if selfHosted {
|
||||
command = []string{"/usr/bin/flock", "--exclusive", "--timeout=30", "/var/lock/api-server.lock"}
|
||||
}
|
||||
|
||||
command = append(getComponentBaseCommand(scheduler),
|
||||
"--address=127.0.0.1",
|
||||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
)
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func getProxyCommand(cfg *kubeadmapi.MasterConfiguration) []string {
|
||||
|
@ -406,3 +471,16 @@ func getProxyEnvVars() []api.EnvVar {
|
|||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
func getSelfHostedAPIServerEnv() []api.EnvVar {
|
||||
podIPEnvVar := api.EnvVar{
|
||||
Name: "POD_IP",
|
||||
ValueFrom: &api.EnvVarSource{
|
||||
FieldRef: &api.ObjectFieldSelector{
|
||||
FieldPath: "status.podIP",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return append(getProxyEnvVars(), podIPEnvVar)
|
||||
}
|
||||
|
|
82
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests_test.go
generated
vendored
82
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/manifests_test.go
generated
vendored
|
@ -33,7 +33,7 @@ func TestWriteStaticPodManifests(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Couldn't create tmpdir")
|
||||
}
|
||||
defer os.Remove(tmpdir)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// set up tmp GlobalEnvParams values for testing
|
||||
oldEnv := kubeadmapi.GlobalEnvParams
|
||||
|
@ -372,13 +372,16 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.crt",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--kubelet-client-certificate=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.crt",
|
||||
"--kubelet-client-key=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.key",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--storage-backend=etcd3",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
},
|
||||
},
|
||||
|
@ -392,13 +395,16 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.crt",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--kubelet-client-certificate=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.crt",
|
||||
"--kubelet-client-key=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.key",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--storage-backend=etcd3",
|
||||
"--advertise-address=foo",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
},
|
||||
|
@ -414,13 +420,16 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.crt",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--kubelet-client-certificate=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.crt",
|
||||
"--kubelet-client-key=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.key",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--storage-backend=etcd3",
|
||||
"--etcd-servers=http://127.0.0.1:2379",
|
||||
"--etcd-certfile=fiz",
|
||||
"--etcd-keyfile=faz",
|
||||
|
@ -438,13 +447,16 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
"--insecure-bind-address=127.0.0.1",
|
||||
"--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota",
|
||||
"--service-cluster-ip-range=bar",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.pem",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--service-account-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--client-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--tls-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.crt",
|
||||
"--tls-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--kubelet-client-certificate=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.crt",
|
||||
"--kubelet-client-key=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-kubelet-client.key",
|
||||
"--token-auth-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/tokens.csv",
|
||||
fmt.Sprintf("--secure-port=%d", 123),
|
||||
"--allow-privileged",
|
||||
"--storage-backend=etcd3",
|
||||
"--advertise-address=foo",
|
||||
"--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname",
|
||||
"--anonymous-auth=false",
|
||||
|
@ -454,7 +466,7 @@ func TestGetAPIServerCommand(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
actual := getAPIServerCommand(rt.cfg)
|
||||
actual := getAPIServerCommand(rt.cfg, false)
|
||||
for i := range actual {
|
||||
if actual[i] != rt.expected[i] {
|
||||
t.Errorf(
|
||||
|
@ -480,11 +492,11 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.key",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=kubeadm:kubelet-bootstrap",
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -495,11 +507,11 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.key",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=kubeadm:kubelet-bootstrap",
|
||||
"--cloud-provider=foo",
|
||||
},
|
||||
},
|
||||
|
@ -511,11 +523,11 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||
"--leader-elect",
|
||||
"--master=127.0.0.1:8080",
|
||||
"--cluster-name=" + DefaultClusterName,
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver-key.pem",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.pem",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca-key.pem",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=system:kubelet-bootstrap",
|
||||
"--root-ca-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--service-account-private-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/apiserver.key",
|
||||
"--cluster-signing-cert-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.crt",
|
||||
"--cluster-signing-key-file=" + kubeadmapi.GlobalEnvParams.HostPKIPath + "/ca.key",
|
||||
"--insecure-experimental-approve-all-kubelet-csrs-for-group=kubeadm:kubelet-bootstrap",
|
||||
"--allocate-node-cidrs=true",
|
||||
"--cluster-cidr=bar",
|
||||
},
|
||||
|
@ -523,7 +535,7 @@ func TestGetControllerManagerCommand(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
actual := getControllerManagerCommand(rt.cfg)
|
||||
actual := getControllerManagerCommand(rt.cfg, false)
|
||||
for i := range actual {
|
||||
if actual[i] != rt.expected[i] {
|
||||
t.Errorf(
|
||||
|
@ -553,7 +565,7 @@ func TestGetSchedulerCommand(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
actual := getSchedulerCommand(rt.cfg)
|
||||
actual := getSchedulerCommand(rt.cfg, false)
|
||||
for i := range actual {
|
||||
if actual[i] != rt.expected[i] {
|
||||
t.Errorf(
|
||||
|
|
330
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/selfhosted.go
generated
vendored
Normal file
330
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/selfhosted.go
generated
vendored
Normal file
|
@ -0,0 +1,330 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package master
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/images"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
ext "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
func CreateSelfHostedControlPlane(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset) error {
|
||||
volumes := []v1.Volume{k8sVolume(cfg)}
|
||||
volumeMounts := []v1.VolumeMount{k8sVolumeMount()}
|
||||
if isCertsVolumeMountNeeded() {
|
||||
volumes = append(volumes, certsVolume(cfg))
|
||||
volumeMounts = append(volumeMounts, certsVolumeMount())
|
||||
}
|
||||
|
||||
if isPkiVolumeMountNeeded() {
|
||||
volumes = append(volumes, pkiVolume(cfg))
|
||||
volumeMounts = append(volumeMounts, pkiVolumeMount())
|
||||
}
|
||||
|
||||
// Need lock for self-hosted
|
||||
volumes = append(volumes, flockVolume())
|
||||
volumeMounts = append(volumeMounts, flockVolumeMount())
|
||||
|
||||
if err := launchSelfHostedAPIServer(cfg, client, volumes, volumeMounts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := launchSelfHostedScheduler(cfg, client, volumes, volumeMounts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := launchSelfHostedControllerManager(cfg, client, volumes, volumeMounts); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func launchSelfHostedAPIServer(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||
start := time.Now()
|
||||
|
||||
apiServer := getAPIServerDS(cfg, volumes, volumeMounts)
|
||||
if _, err := client.Extensions().DaemonSets(metav1.NamespaceSystem).Create(&apiServer); err != nil {
|
||||
return fmt.Errorf("failed to create self-hosted %q daemon set [%v]", kubeAPIServer, err)
|
||||
}
|
||||
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
// TODO: This might be pointless, checking the pods is probably enough.
|
||||
// It does however get us a count of how many there should be which may be useful
|
||||
// with HA.
|
||||
apiDS, err := client.DaemonSets(metav1.NamespaceSystem).Get("self-hosted-"+kubeAPIServer,
|
||||
metav1.GetOptions{})
|
||||
if err != nil {
|
||||
fmt.Println("[self-hosted] error getting apiserver DaemonSet:", err)
|
||||
return false, nil
|
||||
}
|
||||
fmt.Printf("[self-hosted] %s DaemonSet current=%d, desired=%d\n",
|
||||
kubeAPIServer,
|
||||
apiDS.Status.CurrentNumberScheduled,
|
||||
apiDS.Status.DesiredNumberScheduled)
|
||||
|
||||
if apiDS.Status.CurrentNumberScheduled != apiDS.Status.DesiredNumberScheduled {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
// Wait for self-hosted API server to take ownership
|
||||
waitForPodsWithLabel(client, "self-hosted-"+kubeAPIServer, true)
|
||||
|
||||
// Remove temporary API server
|
||||
apiServerStaticManifestPath := buildStaticManifestFilepath(kubeAPIServer)
|
||||
if err := os.RemoveAll(apiServerStaticManifestPath); err != nil {
|
||||
return fmt.Errorf("unable to delete temporary API server manifest [%v]", err)
|
||||
}
|
||||
|
||||
WaitForAPI(client)
|
||||
|
||||
fmt.Printf("[self-hosted] self-hosted kube-apiserver ready after %f seconds\n", time.Since(start).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
func launchSelfHostedControllerManager(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||
start := time.Now()
|
||||
|
||||
ctrlMgr := getControllerManagerDeployment(cfg, volumes, volumeMounts)
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&ctrlMgr); err != nil {
|
||||
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeControllerManager, err)
|
||||
}
|
||||
|
||||
waitForPodsWithLabel(client, "self-hosted-"+kubeControllerManager, false)
|
||||
|
||||
ctrlMgrStaticManifestPath := buildStaticManifestFilepath(kubeControllerManager)
|
||||
if err := os.RemoveAll(ctrlMgrStaticManifestPath); err != nil {
|
||||
return fmt.Errorf("unable to delete temporary controller manager manifest [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Printf("[self-hosted] self-hosted kube-controller-manager ready after %f seconds\n", time.Since(start).Seconds())
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func launchSelfHostedScheduler(cfg *kubeadmapi.MasterConfiguration, client *clientset.Clientset, volumes []v1.Volume, volumeMounts []v1.VolumeMount) error {
|
||||
start := time.Now()
|
||||
scheduler := getSchedulerDeployment(cfg)
|
||||
if _, err := client.Extensions().Deployments(metav1.NamespaceSystem).Create(&scheduler); err != nil {
|
||||
return fmt.Errorf("failed to create self-hosted %q deployment [%v]", kubeScheduler, err)
|
||||
}
|
||||
|
||||
waitForPodsWithLabel(client, "self-hosted-"+kubeScheduler, false)
|
||||
|
||||
schedulerStaticManifestPath := buildStaticManifestFilepath(kubeScheduler)
|
||||
if err := os.RemoveAll(schedulerStaticManifestPath); err != nil {
|
||||
return fmt.Errorf("unable to delete temporary scheduler manifest [%v]", err)
|
||||
}
|
||||
|
||||
fmt.Printf("[self-hosted] self-hosted kube-scheduler ready after %f seconds\n", time.Since(start).Seconds())
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForPodsWithLabel will lookup pods with the given label and wait until they are all
|
||||
// reporting status as running.
|
||||
func waitForPodsWithLabel(client *clientset.Clientset, appLabel string, mustBeRunning bool) {
|
||||
wait.PollInfinite(apiCallRetryInterval, func() (bool, error) {
|
||||
// TODO: Do we need a stronger label link than this?
|
||||
listOpts := metav1.ListOptions{LabelSelector: fmt.Sprintf("k8s-app=%s", appLabel)}
|
||||
apiPods, err := client.Pods(metav1.NamespaceSystem).List(listOpts)
|
||||
if err != nil {
|
||||
fmt.Printf("[self-hosted] error getting %s pods [%v]\n", appLabel, err)
|
||||
return false, nil
|
||||
}
|
||||
fmt.Printf("[self-hosted] Found %d %s pods\n", len(apiPods.Items), appLabel)
|
||||
|
||||
// TODO: HA
|
||||
if int32(len(apiPods.Items)) != 1 {
|
||||
return false, nil
|
||||
}
|
||||
for _, pod := range apiPods.Items {
|
||||
fmt.Printf("[self-hosted] Pod %s status: %s\n", pod.Name, pod.Status.Phase)
|
||||
if mustBeRunning && pod.Status.Phase != "Running" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Sources from bootkube templates.go
|
||||
func getAPIServerDS(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.DaemonSet {
|
||||
ds := ext.DaemonSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "DaemonSet",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "self-hosted-" + kubeAPIServer,
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeAPIServer},
|
||||
},
|
||||
Spec: ext.DaemonSetSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "self-hosted-" + kubeAPIServer,
|
||||
"component": kubeAPIServer,
|
||||
"tier": "control-plane",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: getMasterToleration(),
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||
HostNetwork: true,
|
||||
Volumes: volumes,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "self-hosted-" + kubeAPIServer,
|
||||
Image: images.GetCoreImage(images.KubeAPIServerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getAPIServerCommand(cfg, true),
|
||||
Env: getSelfHostedAPIServerEnv(),
|
||||
VolumeMounts: volumeMounts,
|
||||
LivenessProbe: componentProbe(8080, "/healthz"),
|
||||
Resources: componentResources("250m"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
func getControllerManagerDeployment(cfg *kubeadmapi.MasterConfiguration, volumes []v1.Volume, volumeMounts []v1.VolumeMount) ext.Deployment {
|
||||
d := ext.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "self-hosted-" + kubeControllerManager,
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeControllerManager},
|
||||
},
|
||||
Spec: ext.DeploymentSpec{
|
||||
// TODO bootkube uses 2 replicas
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "self-hosted-" + kubeControllerManager,
|
||||
"component": kubeControllerManager,
|
||||
"tier": "control-plane",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: getMasterToleration(),
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||
HostNetwork: true,
|
||||
Volumes: volumes,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "self-hosted-" + kubeControllerManager,
|
||||
Image: images.GetCoreImage(images.KubeControllerManagerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getControllerManagerCommand(cfg, true),
|
||||
VolumeMounts: volumeMounts,
|
||||
LivenessProbe: componentProbe(10252, "/healthz"),
|
||||
Resources: componentResources("200m"),
|
||||
Env: getProxyEnvVars(),
|
||||
},
|
||||
},
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func getSchedulerDeployment(cfg *kubeadmapi.MasterConfiguration) ext.Deployment {
|
||||
d := ext.Deployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: "extensions/v1beta1",
|
||||
Kind: "Deployment",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "self-hosted-" + kubeScheduler,
|
||||
Namespace: "kube-system",
|
||||
Labels: map[string]string{"k8s-app": "self-hosted-" + kubeScheduler},
|
||||
},
|
||||
Spec: ext.DeploymentSpec{
|
||||
// TODO bootkube uses 2 replicas
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"k8s-app": "self-hosted-" + kubeScheduler,
|
||||
"component": kubeScheduler,
|
||||
"tier": "control-plane",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
v1.TolerationsAnnotationKey: getMasterToleration(),
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
NodeSelector: map[string]string{metav1.NodeLabelKubeadmAlphaRole: metav1.NodeLabelRoleMaster},
|
||||
HostNetwork: true,
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "self-hosted-" + kubeScheduler,
|
||||
Image: images.GetCoreImage(images.KubeSchedulerImage, cfg, kubeadmapi.GlobalEnvParams.HyperkubeImage),
|
||||
Command: getSchedulerCommand(cfg, true),
|
||||
LivenessProbe: componentProbe(10251, "/healthz"),
|
||||
Resources: componentResources("100m"),
|
||||
Env: getProxyEnvVars(),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func buildStaticManifestFilepath(name string) string {
|
||||
return path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests", name+".json")
|
||||
}
|
||||
|
||||
func getMasterToleration() string {
|
||||
// Tolerate the master taint we add to our master nodes, as this can and should
|
||||
// run there.
|
||||
// TODO: Duplicated above
|
||||
masterToleration, _ := json.Marshal([]v1.Toleration{{
|
||||
Key: "dedicated",
|
||||
Value: "master",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}})
|
||||
return string(masterToleration)
|
||||
}
|
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/tokens.go
generated
vendored
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/master/tokens.go
generated
vendored
|
@ -27,12 +27,19 @@ import (
|
|||
"k8s.io/kubernetes/pkg/util/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: prefix with kubeadm prefix
|
||||
KubeletBootstrapUser = "kubeadm-node-csr"
|
||||
|
||||
KubeletBootstrapGroup = "kubeadm:kubelet-bootstrap"
|
||||
)
|
||||
|
||||
func CreateTokenAuthFile(bt string) error {
|
||||
tokenAuthFilePath := path.Join(kubeadmapi.GlobalEnvParams.HostPKIPath, "tokens.csv")
|
||||
if err := os.MkdirAll(kubeadmapi.GlobalEnvParams.HostPKIPath, 0700); err != nil {
|
||||
return fmt.Errorf("failed to create directory %q [%v]", kubeadmapi.GlobalEnvParams.HostPKIPath, err)
|
||||
}
|
||||
serialized := []byte(fmt.Sprintf("%s,kubeadm-node-csr,%s,system:kubelet-bootstrap\n", bt, uuid.NewUUID()))
|
||||
serialized := []byte(fmt.Sprintf("%s,%s,%s,%s\n", bt, KubeletBootstrapUser, uuid.NewUUID(), KubeletBootstrapGroup))
|
||||
// DumpReaderToFile create a file with mode 0600
|
||||
if err := cmdutil.DumpReaderToFile(bytes.NewReader(serialized), tokenAuthFilePath); err != nil {
|
||||
return fmt.Errorf("failed to save token auth file (%q) [%v]", tokenAuthFilePath, err)
|
||||
|
|
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/BUILD
generated
vendored
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/BUILD
generated
vendored
|
@ -20,16 +20,17 @@ go_library(
|
|||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//cmd/kubeadm/app/util:go_default_library",
|
||||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/apis/certificates:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//pkg/kubelet/util/csr:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//vendor:github.com/square/go-jose",
|
||||
"//vendor:k8s.io/apimachinery/pkg/api/errors",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -44,10 +45,10 @@ go_test(
|
|||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//pkg/client/restclient:go_default_library",
|
||||
"//pkg/client/typed/discovery:go_default_library",
|
||||
"//pkg/version:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/version",
|
||||
"//vendor:k8s.io/client-go/rest",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/bootstrap.go
generated
vendored
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/bootstrap.go
generated
vendored
|
@ -22,16 +22,17 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
apierrs "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeconfigphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
)
|
||||
|
||||
// retryTimeout between the subsequent attempts to connect
|
||||
|
@ -128,14 +129,9 @@ func checkForNodeNameDuplicates(clientSet *clientset.Clientset) error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("Failed to get node hostname [%v]", err)
|
||||
}
|
||||
nodeList, err := clientSet.Nodes().List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to list the nodes in the cluster: [%v]\n", err)
|
||||
}
|
||||
for _, node := range nodeList.Items {
|
||||
if hostName == node.Name {
|
||||
return fmt.Errorf("Node with name [%q] already exists.", node.Name)
|
||||
}
|
||||
_, err = clientSet.Nodes().Get(hostName, metav1.GetOptions{})
|
||||
if err != nil && !apierrs.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/bootstrap_test.go
generated
vendored
4
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/bootstrap_test.go
generated
vendored
|
@ -23,11 +23,11 @@ import (
|
|||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/version"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/typed/discovery"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
)
|
||||
|
||||
func TestEstablishMasterConnection(t *testing.T) {
|
||||
|
|
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/csr.go
generated
vendored
6
vendor/k8s.io/kubernetes/cmd/kubeadm/app/node/csr.go
generated
vendored
|
@ -21,11 +21,11 @@ import (
|
|||
"os"
|
||||
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/csr"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
// PerformTLSBootstrap executes a node certificate signing request.
|
||||
|
|
33
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/BUILD
generated
vendored
Normal file
33
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["clusterroles.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/master:go_default_library",
|
||||
"//pkg/apis/rbac/v1beta1:go_default_library",
|
||||
"//pkg/client/clientset_generated/clientset:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
113
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go
generated
vendored
Normal file
113
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/apiconfig/clusterroles.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package apiconfig
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/master"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
// CreateBootstrapRBACClusterRole grants the system:node-bootstrapper role to the group we created the bootstrap credential with
|
||||
func CreateBootstrapRBACClusterRole(clientset *clientset.Clientset) error {
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:kubelet-bootstrap",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:node-bootstrapper",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{Kind: "Group", Name: master.KubeletBootstrapGroup},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created node bootstrapper RBAC rules")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateKubeDNSRBACClusterRole creates the necessary ClusterRole for kube-dns
|
||||
func CreateKubeDNSRBACClusterRole(clientset *clientset.Clientset) error {
|
||||
clusterRole := rbac.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "kubeadm:" + master.KubeDNS},
|
||||
Rules: []rbac.PolicyRule{
|
||||
rbac.NewRule("list", "watch").Groups("").Resources("endpoints", "services").RuleOrDie(),
|
||||
// TODO: remove watch rule when https://github.com/kubernetes/kubernetes/pull/38816 gets merged
|
||||
rbac.NewRule("get", "list", "watch").Groups("").Resources("configmaps").RuleOrDie(),
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoles().Create(&clusterRole); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
subject := rbac.Subject{
|
||||
Kind: "ServiceAccount",
|
||||
Name: master.KubeDNS,
|
||||
Namespace: metav1.NamespaceSystem,
|
||||
}
|
||||
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:" + master.KubeDNS,
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: clusterRole.Name,
|
||||
},
|
||||
Subjects: []rbac.Subject{subject},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created kube-dns RBAC rules")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateKubeProxyClusterRoleBinding grants the system:node-proxier role to the nodes group,
|
||||
// since kubelet credentials are used to run the kube-proxy
|
||||
// TODO: give the kube-proxy its own credential and stop requiring this
|
||||
func CreateKubeProxyClusterRoleBinding(clientset *clientset.Clientset) error {
|
||||
clusterRoleBinding := rbac.ClusterRoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "kubeadm:node-proxier",
|
||||
},
|
||||
RoleRef: rbac.RoleRef{
|
||||
APIGroup: "rbac.authorization.k8s.io",
|
||||
Kind: "ClusterRole",
|
||||
Name: "system:node-proxier",
|
||||
},
|
||||
Subjects: []rbac.Subject{
|
||||
{Kind: "Group", Name: "system:nodes"},
|
||||
},
|
||||
}
|
||||
if _, err := clientset.Rbac().ClusterRoleBindings().Create(&clusterRoleBinding); err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println("[apiconfig] Created kube-proxy RBAC rules")
|
||||
|
||||
return nil
|
||||
}
|
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/BUILD
generated
vendored
18
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/BUILD
generated
vendored
|
@ -10,15 +10,12 @@ load(
|
|||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"certs_test.go",
|
||||
"pki_helpers_test.go",
|
||||
],
|
||||
srcs = ["certs_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -27,13 +24,15 @@ go_library(
|
|||
srcs = [
|
||||
"certs.go",
|
||||
"doc.go",
|
||||
"pki_helpers.go",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//pkg/registry/core/service/ipallocator:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/sets",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -46,6 +45,9 @@ filegroup(
|
|||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
)
|
||||
|
|
161
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go
generated
vendored
161
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs.go
generated
vendored
|
@ -17,21 +17,30 @@ limitations under the License.
|
|||
package certs
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
setutil "k8s.io/apimachinery/pkg/util/sets"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
// TODO: Integration test cases
|
||||
// no files exist => create all four files
|
||||
// valid ca.{crt,key} exists => create apiserver.{crt,key}
|
||||
// valid ca.{crt,key} and apiserver.{crt,key} exists => do nothing
|
||||
// invalid ca.{crt,key} exists => error
|
||||
// only one of the .crt or .key file exists => error
|
||||
|
||||
// CreatePKIAssets will create and write to disk all PKI assets necessary to establish the control plane.
|
||||
// It first generates a self-signed CA certificate, a server certificate (signed by the CA) and a key for
|
||||
// signing service account tokens. It returns CA key and certificate, which is convenient for use with
|
||||
// client config funcs.
|
||||
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration, pkiPath string) (*x509.Certificate, error) {
|
||||
// It generates a self-signed CA certificate and a server certificate (signed by the CA)
|
||||
func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration, pkiDir string) error {
|
||||
altNames := certutil.AltNames{}
|
||||
|
||||
// First, define all domains this cert should be signed for
|
||||
|
@ -43,7 +52,7 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration, pkiPath string) (*x509
|
|||
}
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("couldn't get the hostname: %v", err)
|
||||
return fmt.Errorf("couldn't get the hostname: %v", err)
|
||||
}
|
||||
altNames.DNSNames = append(cfg.API.ExternalDNSNames, hostname)
|
||||
altNames.DNSNames = append(altNames.DNSNames, internalAPIServerFQDN...)
|
||||
|
@ -53,50 +62,136 @@ func CreatePKIAssets(cfg *kubeadmapi.MasterConfiguration, pkiPath string) (*x509
|
|||
if ip := net.ParseIP(a); ip != nil {
|
||||
altNames.IPs = append(altNames.IPs, ip)
|
||||
} else {
|
||||
return nil, fmt.Errorf("could not parse ip %q", a)
|
||||
return fmt.Errorf("could not parse ip %q", a)
|
||||
}
|
||||
}
|
||||
// and lastly, extract the internal IP address for the API server
|
||||
_, n, err := net.ParseCIDR(cfg.Networking.ServiceSubnet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
|
||||
return fmt.Errorf("error parsing CIDR %q: %v", cfg.Networking.ServiceSubnet, err)
|
||||
}
|
||||
internalAPIServerVirtualIP, err := ipallocator.GetIndexedIP(n, 1)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to allocate IP address for the API server from the given CIDR (%q) [%v]", &cfg.Networking.ServiceSubnet, err)
|
||||
return fmt.Errorf("unable to allocate IP address for the API server from the given CIDR (%q) [%v]", &cfg.Networking.ServiceSubnet, err)
|
||||
}
|
||||
|
||||
altNames.IPs = append(altNames.IPs, internalAPIServerVirtualIP)
|
||||
|
||||
caKey, caCert, err := newCertificateAuthority()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure while creating CA keys and certificate [%v]", err)
|
||||
var caCert *x509.Certificate
|
||||
var caKey *rsa.PrivateKey
|
||||
// If at least one of them exists, we should try to load them
|
||||
// In the case that only one exists, there will most likely be an error anyway
|
||||
if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.CACertAndKeyBaseName) {
|
||||
// Try to load ca.crt and ca.key from the PKI directory
|
||||
caCert, caKey, err = pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
if err != nil || caCert == nil || caKey == nil {
|
||||
return fmt.Errorf("certificate and/or key existed but they could not be loaded properly")
|
||||
}
|
||||
|
||||
// The certificate and key could be loaded, but the certificate is not a CA
|
||||
if !caCert.IsCA {
|
||||
return fmt.Errorf("certificate and key could be loaded but the certificate is not a CA")
|
||||
}
|
||||
|
||||
fmt.Println("[certificates] Using the existing CA certificate and key.")
|
||||
} else {
|
||||
// The certificate and the key did NOT exist, let's generate them now
|
||||
caCert, caKey, err = pkiutil.NewCertificateAuthority()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while generating CA certificate and key [%v]", err)
|
||||
}
|
||||
|
||||
if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.CACertAndKeyBaseName, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("failure while saving CA certificate and key [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated CA certificate and key.")
|
||||
}
|
||||
|
||||
if err := writeKeysAndCert(pkiPath, "ca", caKey, caCert); err != nil {
|
||||
return nil, fmt.Errorf("failure while saving CA keys and certificate [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated Certificate Authority key and certificate.")
|
||||
// If at least one of them exists, we should try to load them
|
||||
// In the case that only one exists, there will most likely be an error anyway
|
||||
if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName) {
|
||||
// Try to load apiserver.crt and apiserver.key from the PKI directory
|
||||
apiCert, apiKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName)
|
||||
if err != nil || apiCert == nil || apiKey == nil {
|
||||
return fmt.Errorf("certificate and/or key existed but they could not be loaded properly")
|
||||
}
|
||||
|
||||
apiKey, apiCert, err := newServerKeyAndCert(caCert, caKey, altNames)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure while creating API server keys and certificate [%v]", err)
|
||||
fmt.Println("[certificates] Using the existing API Server certificate and key.")
|
||||
} else {
|
||||
// The certificate and the key did NOT exist, let's generate them now
|
||||
// TODO: Add a test case to verify that this cert has the x509.ExtKeyUsageServerAuth flag
|
||||
config := certutil.Config{
|
||||
CommonName: "kube-apiserver",
|
||||
AltNames: altNames,
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
|
||||
}
|
||||
apiCert, apiKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while creating API server key and certificate [%v]", err)
|
||||
}
|
||||
|
||||
if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.APIServerCertAndKeyBaseName, apiCert, apiKey); err != nil {
|
||||
return fmt.Errorf("failure while saving API server certificate and key [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated API server certificate and key.")
|
||||
}
|
||||
|
||||
if err := writeKeysAndCert(pkiPath, "apiserver", apiKey, apiCert); err != nil {
|
||||
return nil, fmt.Errorf("failure while saving API server keys and certificate [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated API Server key and certificate")
|
||||
// If at least one of them exists, we should try to load them
|
||||
// In the case that only one exists, there will most likely be an error anyway
|
||||
if pkiutil.CertOrKeyExist(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName) {
|
||||
// Try to load apiserver-kubelet-client.crt and apiserver-kubelet-client.key from the PKI directory
|
||||
apiCert, apiKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName)
|
||||
if err != nil || apiCert == nil || apiKey == nil {
|
||||
return fmt.Errorf("certificate and/or key existed but they could not be loaded properly")
|
||||
}
|
||||
|
||||
// Generate a private key for service accounts
|
||||
saKey, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failure while creating service account signing keys [%v]", err)
|
||||
fmt.Println("[certificates] Using the existing API Server kubelet client certificate and key.")
|
||||
} else {
|
||||
// The certificate and the key did NOT exist, let's generate them now
|
||||
// TODO: Add a test case to verify that this cert has the x509.ExtKeyUsageClientAuth flag
|
||||
config := certutil.Config{
|
||||
CommonName: "kube-apiserver-kubelet-client",
|
||||
Organization: []string{"system:masters"},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
apiClientCert, apiClientKey, err := pkiutil.NewCertAndKey(caCert, caKey, config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while creating API server kubelet client key and certificate [%v]", err)
|
||||
}
|
||||
|
||||
if err = pkiutil.WriteCertAndKey(pkiDir, kubeadmconstants.APIServerKubeletClientCertAndKeyBaseName, apiClientCert, apiClientKey); err != nil {
|
||||
return fmt.Errorf("failure while saving API server kubelet client certificate and key [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated API server kubelet client certificate and key.")
|
||||
}
|
||||
if err := writeKeysAndCert(pkiPath, "sa", saKey, nil); err != nil {
|
||||
return nil, fmt.Errorf("failure while saving service account signing keys [%v]", err)
|
||||
}
|
||||
fmt.Println("[certificates] Generated Service Account signing keys")
|
||||
fmt.Printf("[certificates] Created keys and certificates in %q\n", pkiPath)
|
||||
return caCert, nil
|
||||
|
||||
fmt.Printf("[certificates] Valid certificates and keys now exist in %q\n", pkiDir)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify that the cert is valid for all IPs and DNS names it should be valid for
|
||||
func checkAltNamesExist(IPs []net.IP, DNSNames []string, altNames certutil.AltNames) bool {
|
||||
dnsset := setutil.NewString(DNSNames...)
|
||||
|
||||
for _, dnsNameThatShouldExist := range altNames.DNSNames {
|
||||
if !dnsset.Has(dnsNameThatShouldExist) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, ipThatShouldExist := range altNames.IPs {
|
||||
found := false
|
||||
for _, ip := range IPs {
|
||||
if ip.Equal(ipThatShouldExist) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
53
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs_test.go
generated
vendored
53
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/certs_test.go
generated
vendored
|
@ -19,9 +19,11 @@ package certs
|
|||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
)
|
||||
|
||||
|
@ -65,7 +67,7 @@ func TestCreatePKIAssets(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, rt := range tests {
|
||||
_, actual := CreatePKIAssets(rt.cfg, fmt.Sprintf("%s/etc/kubernetes/pki", tmpdir))
|
||||
actual := CreatePKIAssets(rt.cfg, fmt.Sprintf("%s/etc/kubernetes/pki", tmpdir))
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed CreatePKIAssets with an error:\n\texpected: %t\n\t actual: %t",
|
||||
|
@ -75,3 +77,52 @@ func TestCreatePKIAssets(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCheckAltNamesExist(t *testing.T) {
|
||||
var tests = []struct {
|
||||
IPs []net.IP
|
||||
DNSNames []string
|
||||
requiredAltNames certutil.AltNames
|
||||
succeed bool
|
||||
}{
|
||||
{
|
||||
// equal
|
||||
requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}},
|
||||
IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")},
|
||||
DNSNames: []string{"foo", "bar", "baz"},
|
||||
succeed: true,
|
||||
},
|
||||
{
|
||||
// the loaded cert has more ips than required, ok
|
||||
requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}},
|
||||
IPs: []net.IP{net.ParseIP("192.168.2.5"), net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")},
|
||||
DNSNames: []string{"a", "foo", "b", "bar", "baz"},
|
||||
succeed: true,
|
||||
},
|
||||
{
|
||||
// the loaded cert doesn't have all ips
|
||||
requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.2.5"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "baz"}},
|
||||
IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")},
|
||||
DNSNames: []string{"foo", "bar", "baz"},
|
||||
succeed: false,
|
||||
},
|
||||
{
|
||||
// the loaded cert doesn't have all ips
|
||||
requiredAltNames: certutil.AltNames{IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")}, DNSNames: []string{"foo", "bar", "b", "baz"}},
|
||||
IPs: []net.IP{net.ParseIP("1.1.1.1"), net.ParseIP("192.168.1.2")},
|
||||
DNSNames: []string{"foo", "bar", "baz"},
|
||||
succeed: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
succeeded := checkAltNamesExist(rt.IPs, rt.DNSNames, rt.requiredAltNames)
|
||||
if succeeded != rt.succeed {
|
||||
t.Errorf(
|
||||
"failed checkAltNamesExist:\n\texpected: %t\n\t actual: %t",
|
||||
rt.succeed,
|
||||
succeeded,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/doc.go
generated
vendored
13
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/doc.go
generated
vendored
|
@ -30,12 +30,9 @@ package certs
|
|||
|
||||
OUTPUTS:
|
||||
Files to PKIPath (default /etc/kubernetes/pki):
|
||||
- apiserver-key.pem
|
||||
- apiserver-pub.pem
|
||||
- apiserver.pem
|
||||
- ca-key.pem
|
||||
- ca-pub.pem
|
||||
- ca.pem
|
||||
- sa-key.pem
|
||||
- sa-pub.pem
|
||||
- ca.crt
|
||||
- ca.key
|
||||
- apiserver.crt
|
||||
- apiserver.key
|
||||
|
||||
*/
|
||||
|
|
110
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pki_helpers.go
generated
vendored
110
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pki_helpers.go
generated
vendored
|
@ -1,110 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import (
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
func newCertificateAuthority() (*rsa.PrivateKey, *x509.Certificate, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: "kubernetes",
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSelfSignedCACert(config, key)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create self-signed certificate [%v]", err)
|
||||
}
|
||||
|
||||
return key, cert, nil
|
||||
}
|
||||
|
||||
func newServerKeyAndCert(caCert *x509.Certificate, caKey *rsa.PrivateKey, altNames certutil.AltNames) (*rsa.PrivateKey, *x509.Certificate, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: "kube-apiserver",
|
||||
AltNames: altNames,
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, caCert, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sign certificate [%v]", err)
|
||||
}
|
||||
|
||||
return key, cert, nil
|
||||
}
|
||||
|
||||
func NewClientKeyAndCert(caCert *x509.Certificate, caKey *rsa.PrivateKey) (*rsa.PrivateKey, *x509.Certificate, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: "kubernetes-client",
|
||||
}
|
||||
cert, err := certutil.NewSignedCert(config, key, caCert, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sign certificate [%v]", err)
|
||||
}
|
||||
|
||||
return key, cert, nil
|
||||
}
|
||||
|
||||
func writeKeysAndCert(pkiPath string, name string, key *rsa.PrivateKey, cert *x509.Certificate) error {
|
||||
publicKeyPath, privateKeyPath, certificatePath := pathsKeysCerts(pkiPath, name)
|
||||
|
||||
if key != nil {
|
||||
if err := certutil.WriteKey(privateKeyPath, certutil.EncodePrivateKeyPEM(key)); err != nil {
|
||||
return fmt.Errorf("unable to write private key file (%q) [%v]", privateKeyPath, err)
|
||||
}
|
||||
if pubKey, err := certutil.EncodePublicKeyPEM(&key.PublicKey); err == nil {
|
||||
if err := certutil.WriteKey(publicKeyPath, pubKey); err != nil {
|
||||
return fmt.Errorf("unable to write public key file (%q) [%v]", publicKeyPath, err)
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("unable to encode public key to PEM [%v]", err)
|
||||
}
|
||||
}
|
||||
|
||||
if cert != nil {
|
||||
if err := certutil.WriteCert(certificatePath, certutil.EncodeCertPEM(cert)); err != nil {
|
||||
return fmt.Errorf("unable to write certificate file (%q) [%v]", certificatePath, err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func pathsKeysCerts(pkiPath, name string) (string, string, string) {
|
||||
return path.Join(pkiPath, fmt.Sprintf("%s-pub.pem", name)),
|
||||
path.Join(pkiPath, fmt.Sprintf("%s-key.pem", name)),
|
||||
path.Join(pkiPath, fmt.Sprintf("%s.pem", name))
|
||||
}
|
171
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pki_helpers_test.go
generated
vendored
171
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pki_helpers_test.go
generated
vendored
|
@ -1,171 +0,0 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package certs
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
)
|
||||
|
||||
func TestNewCertificateAuthority(t *testing.T) {
|
||||
r, x, err := newCertificateAuthority()
|
||||
|
||||
if r == nil {
|
||||
t.Errorf(
|
||||
"failed newCertificateAuthority, rsa key == nil",
|
||||
)
|
||||
}
|
||||
if x == nil {
|
||||
t.Errorf(
|
||||
"failed newCertificateAuthority, x509 cert == nil",
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed newCertificateAuthority with an error: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewServerKeyAndCert(t *testing.T) {
|
||||
var tests = []struct {
|
||||
caKeySize int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
// RSA key too small
|
||||
caKeySize: 128,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
// Should succeed
|
||||
caKeySize: 2048,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
caKey, err := rsa.GenerateKey(rand.Reader, rt.caKeySize)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create rsa Private Key")
|
||||
}
|
||||
caCert := &x509.Certificate{}
|
||||
altNames := certutil.AltNames{}
|
||||
_, _, actual := newServerKeyAndCert(caCert, caKey, altNames)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed newServerKeyAndCert:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewClientKeyAndCert(t *testing.T) {
|
||||
var tests = []struct {
|
||||
caKeySize int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
// RSA key too small
|
||||
caKeySize: 128,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
caKeySize: 2048,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
caKey, err := rsa.GenerateKey(rand.Reader, rt.caKeySize)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create rsa Private Key")
|
||||
}
|
||||
caCert := &x509.Certificate{}
|
||||
_, _, actual := NewClientKeyAndCert(caCert, caKey)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed NewClientKeyAndCert:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteKeysAndCert(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create tmpdir")
|
||||
}
|
||||
defer os.Remove(tmpdir)
|
||||
|
||||
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create rsa Private Key")
|
||||
}
|
||||
caCert := &x509.Certificate{}
|
||||
actual := writeKeysAndCert(tmpdir, "foo", caKey, caCert)
|
||||
if actual != nil {
|
||||
t.Errorf(
|
||||
"failed writeKeysAndCert with an error: %v",
|
||||
actual,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPathsKeysCerts(t *testing.T) {
|
||||
var tests = []struct {
|
||||
pkiPath string
|
||||
name string
|
||||
expected []string
|
||||
}{
|
||||
{
|
||||
pkiPath: "foo",
|
||||
name: "bar",
|
||||
expected: []string{"foo/bar-pub.pem", "foo/bar-key.pem", "foo/bar.pem"},
|
||||
},
|
||||
{
|
||||
pkiPath: "bar",
|
||||
name: "foo",
|
||||
expected: []string{"bar/foo-pub.pem", "bar/foo-key.pem", "bar/foo.pem"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
a, b, c := pathsKeysCerts(rt.pkiPath, rt.name)
|
||||
all := []string{a, b, c}
|
||||
for i := range all {
|
||||
if all[i] != rt.expected[i] {
|
||||
t.Errorf(
|
||||
"failed pathsKeysCerts:\n\texpected: %s\n\t actual: %s",
|
||||
rt.expected[i],
|
||||
all[i],
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
37
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/BUILD
generated
vendored
Normal file
37
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/BUILD
generated
vendored
Normal file
|
@ -0,0 +1,37 @@
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = ["pki_helpers_test.go"],
|
||||
library = ":go_default_library",
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor:k8s.io/client-go/util/cert"],
|
||||
)
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["pki_helpers.go"],
|
||||
tags = ["automanaged"],
|
||||
deps = ["//vendor:k8s.io/client-go/util/cert"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
)
|
145
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go
generated
vendored
Normal file
145
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pkiutil
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
// TODO: It should be able to generate different types of private keys, at least: RSA and ECDSA (and in the future maybe Ed25519 as well)
|
||||
// TODO: See if it makes sense to move this package directly to pkg/util/cert
|
||||
|
||||
func NewCertificateAuthority() (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
|
||||
}
|
||||
|
||||
config := certutil.Config{
|
||||
CommonName: "kubernetes",
|
||||
}
|
||||
cert, err := certutil.NewSelfSignedCACert(config, key)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create self-signed certificate [%v]", err)
|
||||
}
|
||||
|
||||
return cert, key, nil
|
||||
}
|
||||
|
||||
func NewCertAndKey(caCert *x509.Certificate, caKey *rsa.PrivateKey, config certutil.Config) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
key, err := certutil.NewPrivateKey()
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to create private key [%v]", err)
|
||||
}
|
||||
|
||||
cert, err := certutil.NewSignedCert(config, key, caCert, caKey)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("unable to sign certificate [%v]", err)
|
||||
}
|
||||
|
||||
return cert, key, nil
|
||||
}
|
||||
|
||||
func WriteCertAndKey(pkiPath string, name string, cert *x509.Certificate, key *rsa.PrivateKey) error {
|
||||
certificatePath, privateKeyPath := pathsForCertAndKey(pkiPath, name)
|
||||
|
||||
if cert == nil {
|
||||
return fmt.Errorf("certificate cannot be nil when writing to file")
|
||||
}
|
||||
if cert == nil {
|
||||
return fmt.Errorf("private key cannot be nil when writing to file")
|
||||
}
|
||||
|
||||
if err := certutil.WriteKey(privateKeyPath, certutil.EncodePrivateKeyPEM(key)); err != nil {
|
||||
return fmt.Errorf("unable to write private key to file %q: [%v]", privateKeyPath, err)
|
||||
}
|
||||
|
||||
if err := certutil.WriteCert(certificatePath, certutil.EncodeCertPEM(cert)); err != nil {
|
||||
return fmt.Errorf("unable to write certificate to file %q: [%v]", certificatePath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CertOrKeyExist retuns a boolean whether the cert or the key exists
|
||||
func CertOrKeyExist(pkiPath, name string) bool {
|
||||
certificatePath, privateKeyPath := pathsForCertAndKey(pkiPath, name)
|
||||
|
||||
_, certErr := os.Stat(certificatePath)
|
||||
_, keyErr := os.Stat(privateKeyPath)
|
||||
if os.IsNotExist(certErr) && os.IsNotExist(keyErr) {
|
||||
// The cert or the key did not exist
|
||||
return false
|
||||
}
|
||||
|
||||
// Both files exist or one of them
|
||||
return true
|
||||
}
|
||||
|
||||
// TryLoadCertAndKeyFromDisk tries to load a cert and a key from the disk and validates that they are valid
|
||||
func TryLoadCertAndKeyFromDisk(pkiPath, name string) (*x509.Certificate, *rsa.PrivateKey, error) {
|
||||
certificatePath, privateKeyPath := pathsForCertAndKey(pkiPath, name)
|
||||
|
||||
certs, err := certutil.CertsFromFile(certificatePath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't load the certificate file %s: %v", certificatePath, err)
|
||||
}
|
||||
|
||||
// We are only putting one certificate in the certificate pem file, so it's safe to just pick the first one
|
||||
// TODO: Support multiple certs here in order to be able to rotate certs
|
||||
cert := certs[0]
|
||||
|
||||
// Parse the private key from a file
|
||||
privKey, err := certutil.PrivateKeyFromFile(privateKeyPath)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("couldn't load the private key file %s: %v", privateKeyPath, err)
|
||||
}
|
||||
var key *rsa.PrivateKey
|
||||
switch k := privKey.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
key = k
|
||||
case *ecdsa.PrivateKey:
|
||||
// TODO: Abstract rsa.PrivateKey away and make certutil.NewSignedCert accept a ecdsa.PrivateKey as well
|
||||
// After that, we can support generating kubeconfig files from ecdsa private keys as well
|
||||
return nil, nil, fmt.Errorf("the private key file %s isn't in RSA format", privateKeyPath)
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("the private key file %s isn't in RSA format", privateKeyPath)
|
||||
}
|
||||
|
||||
// Check so that the certificate is valid now
|
||||
now := time.Now()
|
||||
if now.Before(cert.NotBefore) {
|
||||
return nil, nil, fmt.Errorf("the certificate is not valid yet")
|
||||
}
|
||||
if now.After(cert.NotAfter) {
|
||||
return nil, nil, fmt.Errorf("the certificate is has expired")
|
||||
}
|
||||
|
||||
return cert, key, nil
|
||||
}
|
||||
|
||||
func pathsForCertAndKey(pkiPath, name string) (string, string) {
|
||||
return path.Join(pkiPath, fmt.Sprintf("%s.crt", name)), path.Join(pkiPath, fmt.Sprintf("%s.key", name))
|
||||
}
|
109
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go
generated
vendored
Normal file
109
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil/pki_helpers_test.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
|||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package pkiutil
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
)
|
||||
|
||||
func TestNewCertificateAuthority(t *testing.T) {
|
||||
cert, key, err := NewCertificateAuthority()
|
||||
|
||||
if cert == nil {
|
||||
t.Errorf(
|
||||
"failed NewCertificateAuthority, cert == nil",
|
||||
)
|
||||
}
|
||||
if key == nil {
|
||||
t.Errorf(
|
||||
"failed NewCertificateAuthority, key == nil",
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
t.Errorf(
|
||||
"failed NewCertificateAuthority with an error: %v",
|
||||
err,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCertAndKey(t *testing.T) {
|
||||
var tests = []struct {
|
||||
caKeySize int
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
// RSA key too small
|
||||
caKeySize: 128,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
// Should succeed
|
||||
caKeySize: 2048,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, rt := range tests {
|
||||
caKey, err := rsa.GenerateKey(rand.Reader, rt.caKeySize)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create rsa Private Key")
|
||||
}
|
||||
caCert := &x509.Certificate{}
|
||||
config := certutil.Config{
|
||||
CommonName: "test",
|
||||
Organization: []string{"test"},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
_, _, actual := NewCertAndKey(caCert, caKey, config)
|
||||
if (actual == nil) != rt.expected {
|
||||
t.Errorf(
|
||||
"failed NewCertAndKey:\n\texpected: %t\n\t actual: %t",
|
||||
rt.expected,
|
||||
(actual == nil),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteCertAndKey(t *testing.T) {
|
||||
tmpdir, err := ioutil.TempDir("", "")
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create tmpdir")
|
||||
}
|
||||
defer os.Remove(tmpdir)
|
||||
|
||||
caKey, err := rsa.GenerateKey(rand.Reader, 2048)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't create rsa Private Key")
|
||||
}
|
||||
caCert := &x509.Certificate{}
|
||||
actual := WriteCertAndKey(tmpdir, "foo", caCert, caKey)
|
||||
if actual != nil {
|
||||
t.Errorf(
|
||||
"failed WriteCertAndKey with an error: %v",
|
||||
actual,
|
||||
)
|
||||
}
|
||||
}
|
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/BUILD
generated
vendored
9
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/BUILD
generated
vendored
|
@ -24,10 +24,11 @@ go_library(
|
|||
],
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/phases/certs:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd:go_default_library",
|
||||
"//pkg/client/unversioned/clientcmd/api:go_default_library",
|
||||
"//pkg/util/cert:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/certs/pkiutil:go_default_library",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd",
|
||||
"//vendor:k8s.io/client-go/tools/clientcmd/api",
|
||||
"//vendor:k8s.io/client-go/util/cert",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
166
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go
generated
vendored
166
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig.go
generated
vendored
|
@ -17,105 +17,155 @@ limitations under the License.
|
|||
package kubeconfig
|
||||
|
||||
import (
|
||||
"crypto/ecdsa"
|
||||
"bytes"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
certphase "k8s.io/kubernetes/cmd/kubeadm/app/phases/certs"
|
||||
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
|
||||
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
|
||||
certutil "k8s.io/kubernetes/pkg/util/cert"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
certutil "k8s.io/client-go/util/cert"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/certs/pkiutil"
|
||||
)
|
||||
|
||||
const (
|
||||
KubernetesDirPermissions = 0700
|
||||
AdminKubeConfigFileName = "admin.conf"
|
||||
KubeletKubeConfigFileName = "kubelet.conf"
|
||||
KubernetesDirPermissions = 0700
|
||||
KubeConfigFilePermissions = 0600
|
||||
AdminKubeConfigFileName = "admin.conf"
|
||||
AdminKubeConfigClientName = "kubernetes-admin"
|
||||
KubeletKubeConfigFileName = "kubelet.conf"
|
||||
KubeletKubeConfigClientName = "kubelet"
|
||||
)
|
||||
|
||||
// This function is called from the main init and does the work for the default phase behaviour
|
||||
// TODO: Make an integration test for this function that runs after the certificates phase
|
||||
// and makes sure that those two phases work well together...
|
||||
func CreateAdminAndKubeletKubeConfig(masterEndpoint, pkiDir, outDir string) error {
|
||||
// Parse the certificate from a file
|
||||
caCertPath := path.Join(pkiDir, "ca.pem")
|
||||
caCerts, err := certutil.CertsFromFile(caCertPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't load the CA cert file %s: %v", caCertPath, err)
|
||||
}
|
||||
// We are only putting one certificate in the CA certificate pem file, so it's safe to just use the first one
|
||||
caCert := caCerts[0]
|
||||
|
||||
// Parse the rsa private key from a file
|
||||
caKeyPath := path.Join(pkiDir, "ca-key.pem")
|
||||
priv, err := certutil.PrivateKeyFromFile(caKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("couldn't load the CA private key file %s: %v", caKeyPath, err)
|
||||
}
|
||||
var caKey *rsa.PrivateKey
|
||||
switch k := priv.(type) {
|
||||
case *rsa.PrivateKey:
|
||||
caKey = k
|
||||
case *ecdsa.PrivateKey:
|
||||
// TODO: Abstract rsa.PrivateKey away and make certutil.NewSignedCert accept a ecdsa.PrivateKey as well
|
||||
// After that, we can support generating kubeconfig files from ecdsa private keys as well
|
||||
return fmt.Errorf("the CA private key file %s isn't in RSA format", caKeyPath)
|
||||
default:
|
||||
return fmt.Errorf("the CA private key file %s isn't in RSA format", caKeyPath)
|
||||
// TODO: Integration test cases:
|
||||
// /etc/kubernetes/{admin,kubelet}.conf don't exist => generate kubeconfig files
|
||||
// /etc/kubernetes/{admin,kubelet}.conf and certs in /etc/kubernetes/pki exist => don't touch anything as long as everything's valid
|
||||
// /etc/kubernetes/{admin,kubelet}.conf exist but the server URL is invalid in those files => error
|
||||
// /etc/kubernetes/{admin,kubelet}.conf exist but the CA cert doesn't match what's in the pki dir => error
|
||||
// /etc/kubernetes/{admin,kubelet}.conf exist but not certs => certs will be generated and conflict with the kubeconfig files => error
|
||||
|
||||
// CreateAdminAndKubeletKubeConfig is called from the main init and does the work for the default phase behaviour
|
||||
func CreateAdminAndKubeletKubeConfig(masterEndpoint, pkiDir, outDir string) error {
|
||||
|
||||
// Try to load ca.crt and ca.key from the PKI directory
|
||||
caCert, caKey, err := pkiutil.TryLoadCertAndKeyFromDisk(pkiDir, kubeadmconstants.CACertAndKeyBaseName)
|
||||
if err != nil || caCert == nil || caKey == nil {
|
||||
return fmt.Errorf("couldn't create a kubeconfig; the CA files couldn't be loaded: %v", err)
|
||||
}
|
||||
|
||||
// User admin should have full access to the cluster
|
||||
if err := createKubeConfigFileForClient(masterEndpoint, "admin", outDir, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("couldn't create a kubeconfig file for admin: %v", err)
|
||||
// TODO: Add test case that make sure this cert has the x509.ExtKeyUsageClientAuth flag
|
||||
adminCertConfig := certutil.Config{
|
||||
CommonName: AdminKubeConfigClientName,
|
||||
Organization: []string{"system:masters"},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
adminKubeConfigFilePath := filepath.Join(outDir, AdminKubeConfigFileName)
|
||||
if err := createKubeConfigFileForClient(masterEndpoint, adminKubeConfigFilePath, adminCertConfig, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("couldn't create config for %s: %v", AdminKubeConfigClientName, err)
|
||||
}
|
||||
|
||||
// TODO: The kubelet should have limited access to the cluster
|
||||
if err := createKubeConfigFileForClient(masterEndpoint, "kubelet", outDir, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("couldn't create a kubeconfig file for kubelet: %v", err)
|
||||
// TODO: The kubelet should have limited access to the cluster. Right now, this gives kubelet basically root access
|
||||
// and we do need that in the bootstrap phase, but we should swap it out after the control plane is up
|
||||
// TODO: Add test case that make sure this cert has the x509.ExtKeyUsageClientAuth flag
|
||||
kubeletCertConfig := certutil.Config{
|
||||
CommonName: KubeletKubeConfigClientName,
|
||||
Organization: []string{"system:nodes"},
|
||||
Usages: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth},
|
||||
}
|
||||
kubeletKubeConfigFilePath := filepath.Join(outDir, KubeletKubeConfigFileName)
|
||||
if err := createKubeConfigFileForClient(masterEndpoint, kubeletKubeConfigFilePath, kubeletCertConfig, caCert, caKey); err != nil {
|
||||
return fmt.Errorf("couldn't create a kubeconfig file for %s: %v", KubeletKubeConfigClientName, err)
|
||||
}
|
||||
|
||||
// TODO make credentials for the controller-manager, scheduler and kube-proxy
|
||||
return nil
|
||||
}
|
||||
|
||||
func createKubeConfigFileForClient(masterEndpoint, client, outDir string, caCert *x509.Certificate, caKey *rsa.PrivateKey) error {
|
||||
key, cert, err := certphase.NewClientKeyAndCert(caCert, caKey)
|
||||
func createKubeConfigFileForClient(masterEndpoint, kubeConfigFilePath string, config certutil.Config, caCert *x509.Certificate, caKey *rsa.PrivateKey) error {
|
||||
cert, key, err := pkiutil.NewCertAndKey(caCert, caKey, config)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failure while creating %s client certificate [%v]", client, err)
|
||||
return fmt.Errorf("failure while creating %s client certificate [%v]", config.CommonName, err)
|
||||
}
|
||||
|
||||
config := MakeClientConfigWithCerts(
|
||||
kubeconfig := MakeClientConfigWithCerts(
|
||||
masterEndpoint,
|
||||
"kubernetes",
|
||||
client,
|
||||
config.CommonName,
|
||||
certutil.EncodeCertPEM(caCert),
|
||||
certutil.EncodePrivateKeyPEM(key),
|
||||
certutil.EncodeCertPEM(cert),
|
||||
)
|
||||
|
||||
// Write it now to a file
|
||||
filepath := path.Join(outDir, fmt.Sprintf("%s.conf", client))
|
||||
return WriteKubeconfigToDisk(filepath, config)
|
||||
// Write it now to a file if there already isn't a valid one
|
||||
return writeKubeconfigToDiskIfNotExists(kubeConfigFilePath, kubeconfig)
|
||||
}
|
||||
|
||||
func WriteKubeconfigToDisk(filepath string, kubeconfig *clientcmdapi.Config) error {
|
||||
// Make sure the dir exists or can be created
|
||||
if err := os.MkdirAll(path.Dir(filepath), KubernetesDirPermissions); err != nil {
|
||||
return fmt.Errorf("failed to create directory %q [%v]", path.Dir(filepath), err)
|
||||
func WriteKubeconfigToDisk(filename string, kubeconfig *clientcmdapi.Config) error {
|
||||
// Convert the KubeConfig object to a byte array
|
||||
content, err := clientcmd.Write(*kubeconfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If err == nil, the file exists. Oops, we don't allow the file to exist already, fail.
|
||||
if _, err := os.Stat(filepath); err == nil {
|
||||
return fmt.Errorf("kubeconfig file %s already exists, but must not exist.", filepath)
|
||||
// Create the directory if it does not exist
|
||||
dir := filepath.Dir(filename)
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
if err = os.MkdirAll(dir, KubernetesDirPermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := clientcmd.WriteToFile(*kubeconfig, filepath); err != nil {
|
||||
return fmt.Errorf("failed to write to %q [%v]", filepath, err)
|
||||
// No such kubeconfig file exists; write that kubeconfig down to disk then
|
||||
if err := ioutil.WriteFile(filename, content, KubeConfigFilePermissions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filepath)
|
||||
fmt.Printf("[kubeconfig] Wrote KubeConfig file to disk: %q\n", filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeKubeconfigToDiskIfNotExists saves the KubeConfig struct to disk if there isn't any file at the given path
|
||||
// If there already is a KubeConfig file at the given path; kubeadm tries to load it and check if the values in the
|
||||
// existing and the expected config equals. If they do; kubeadm will just skip writing the file as it's up-to-date,
|
||||
// but if a file exists but has old content or isn't a kubeconfig file, this function returns an error.
|
||||
func writeKubeconfigToDiskIfNotExists(filename string, expectedConfig *clientcmdapi.Config) error {
|
||||
// Check if the file exist, and if it doesn't, just write it to disk
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
return WriteKubeconfigToDisk(filename, expectedConfig)
|
||||
}
|
||||
|
||||
// The kubeconfig already exists, let's check if it has got the same CA and server URL
|
||||
currentConfig, err := clientcmd.LoadFromFile(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load kubeconfig that already exists on disk [%v]", err)
|
||||
}
|
||||
|
||||
expectedCtx := expectedConfig.CurrentContext
|
||||
expectedCluster := expectedConfig.Contexts[expectedCtx].Cluster
|
||||
currentCtx := currentConfig.CurrentContext
|
||||
currentCluster := currentConfig.Contexts[currentCtx].Cluster
|
||||
// If the current CA cert on disk doesn't match the expected CA cert, error out because we have a file, but it's stale
|
||||
if !bytes.Equal(currentConfig.Clusters[currentCluster].CertificateAuthorityData, expectedConfig.Clusters[expectedCluster].CertificateAuthorityData) {
|
||||
return fmt.Errorf("a kubeconfig file %q exists already but has got the wrong CA cert", filename)
|
||||
}
|
||||
// If the current API Server location on disk doesn't match the expected API server, error out because we have a file, but it's stale
|
||||
if currentConfig.Clusters[currentCluster].Server != expectedConfig.Clusters[expectedCluster].Server {
|
||||
return fmt.Errorf("a kubeconfig file %q exists already but has got the wrong API Server URL", filename)
|
||||
}
|
||||
|
||||
// kubeadm doesn't validate the existing kubeconfig file more than this (kubeadm trusts the client certs to be valid)
|
||||
// Basically, if we find a kubeconfig file with the same path; the same CA cert and the same server URL;
|
||||
// kubeadm thinks those files are equal and doesn't bother writing a new file
|
||||
fmt.Printf("[kubeconfig] Using existing up-to-date KubeConfig file: %q\n", filename)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig/kubeconfig_test.go
generated
vendored
|
@ -142,7 +142,7 @@ func TestWriteKubeconfigToDisk(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Couldn't create tmpdir")
|
||||
}
|
||||
defer os.Remove(tmpdir)
|
||||
defer os.RemoveAll(tmpdir)
|
||||
|
||||
// set up tmp GlobalEnvParams values for testing
|
||||
oldEnv := kubeadmapi.GlobalEnvParams
|
||||
|
|
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/BUILD
generated
vendored
2
vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/BUILD
generated
vendored
|
@ -14,6 +14,8 @@ go_library(
|
|||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//cmd/kubeadm/app/apis/kubeadm:go_default_library",
|
||||
"//cmd/kubeadm/app/constants:go_default_library",
|
||||
"//cmd/kubeadm/app/phases/kubeconfig:go_default_library",
|
||||
"//pkg/api/validation:go_default_library",
|
||||
"//pkg/util/initsystem:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
|
|
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go
generated
vendored
16
vendor/k8s.io/kubernetes/cmd/kubeadm/app/preflight/checks.go
generated
vendored
|
@ -25,10 +25,12 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
|
||||
kubeadmconstants "k8s.io/kubernetes/cmd/kubeadm/app/constants"
|
||||
"k8s.io/kubernetes/cmd/kubeadm/app/phases/kubeconfig"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/util/initsystem"
|
||||
"k8s.io/kubernetes/pkg/util/node"
|
||||
|
@ -212,7 +214,7 @@ func (fcc FileContentCheck) Check() (warnings, errors []error) {
|
|||
|
||||
}
|
||||
|
||||
// InPathCheck checks if the given executable is present in the path.
|
||||
// InPathCheck checks if the given executable is present in the path
|
||||
type InPathCheck struct {
|
||||
executable string
|
||||
mandatory bool
|
||||
|
@ -317,11 +319,8 @@ func RunInitMasterChecks(cfg *kubeadmapi.MasterConfiguration) error {
|
|||
PortOpenCheck{port: 10251},
|
||||
PortOpenCheck{port: 10252},
|
||||
HTTPProxyCheck{Proto: "https", Host: cfg.API.AdvertiseAddresses[0], Port: int(cfg.API.Port)},
|
||||
DirAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: kubeadmapi.GlobalEnvParams.HostPKIPath},
|
||||
DirAvailableCheck{Path: filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: "/var/lib/kubelet"},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "admin.conf")},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
FileContentCheck{Path: bridgenf, Content: []byte{'1'}},
|
||||
InPathCheck{executable: "ip", mandatory: true},
|
||||
InPathCheck{executable: "iptables", mandatory: true},
|
||||
|
@ -353,9 +352,10 @@ func RunJoinNodeChecks(cfg *kubeadmapi.NodeConfiguration) error {
|
|||
ServiceCheck{Service: "kubelet", CheckIfActive: false},
|
||||
ServiceCheck{Service: "docker", CheckIfActive: true},
|
||||
PortOpenCheck{port: 10250},
|
||||
DirAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "manifests")},
|
||||
DirAvailableCheck{Path: "/var/lib/kubelet"},
|
||||
FileAvailableCheck{Path: path.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, "kubelet.conf")},
|
||||
FileAvailableCheck{Path: filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeadmconstants.CACertName)},
|
||||
FileAvailableCheck{Path: filepath.Join(kubeadmapi.GlobalEnvParams.KubernetesDir, kubeconfig.KubeletKubeConfigFileName)},
|
||||
FileContentCheck{Path: bridgenf, Content: []byte{'1'}},
|
||||
InPathCheck{executable: "ip", mandatory: true},
|
||||
InPathCheck{executable: "iptables", mandatory: true},
|
||||
|
|
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/tokens.go
generated
vendored
8
vendor/k8s.io/kubernetes/cmd/kubeadm/app/util/tokens.go
generated
vendored
|
@ -136,11 +136,11 @@ func UpdateOrCreateToken(client *clientset.Clientset, d *kubeadmapi.TokenDiscove
|
|||
secretName := fmt.Sprintf("%s%s", BootstrapTokenSecretPrefix, d.ID)
|
||||
var lastErr error
|
||||
for i := 0; i < tokenCreateRetries; i++ {
|
||||
secret, err := client.Secrets(api.NamespaceSystem).Get(secretName, metav1.GetOptions{})
|
||||
secret, err := client.Secrets(metav1.NamespaceSystem).Get(secretName, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
// Secret with this ID already exists, update it:
|
||||
secret.Data = encodeTokenSecretData(d, tokenDuration)
|
||||
if _, err := client.Secrets(api.NamespaceSystem).Update(secret); err == nil {
|
||||
if _, err := client.Secrets(metav1.NamespaceSystem).Update(secret); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
lastErr = err
|
||||
|
@ -151,13 +151,13 @@ func UpdateOrCreateToken(client *clientset.Clientset, d *kubeadmapi.TokenDiscove
|
|||
// Secret does not already exist:
|
||||
if apierrors.IsNotFound(err) {
|
||||
secret = &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: secretName,
|
||||
},
|
||||
Type: api.SecretTypeBootstrapToken,
|
||||
Data: encodeTokenSecretData(d, tokenDuration),
|
||||
}
|
||||
if _, err := client.Secrets(api.NamespaceSystem).Create(secret); err == nil {
|
||||
if _, err := client.Secrets(metav1.NamespaceSystem).Create(secret); err == nil {
|
||||
return nil
|
||||
} else {
|
||||
lastErr = err
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue